< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 300   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 301   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 302   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 303   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 304   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 305   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 306 
 307   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 308   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 309 
 310   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 311   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 312   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 313   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 314 
 315   case vmIntrinsics::_compressStringC:
 316   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 317   case vmIntrinsics::_inflateStringC:
 318   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 319 


 320   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 321   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 322   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 323   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 324   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 325   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 326   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 327   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 328   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 329 
 330   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 331   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 332   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 333   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 334   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 335   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 336   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 337   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 338   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 339 
 340   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 341   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 342   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 343   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 344   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 345   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 346   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 347   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 348   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 349 
 350   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 351   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 352   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 353   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 354   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 355   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 356   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 357   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 358   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 492   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 493   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 494   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 495   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 496   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 497 
 498   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 499   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 500 
 501   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 502 
 503   case vmIntrinsics::_isInstance:
 504   case vmIntrinsics::_getModifiers:
 505   case vmIntrinsics::_isInterface:
 506   case vmIntrinsics::_isArray:
 507   case vmIntrinsics::_isPrimitive:
 508   case vmIntrinsics::_isHidden:
 509   case vmIntrinsics::_getSuperclass:
 510   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 511 





 512   case vmIntrinsics::_floatToRawIntBits:
 513   case vmIntrinsics::_floatToIntBits:
 514   case vmIntrinsics::_intBitsToFloat:
 515   case vmIntrinsics::_doubleToRawLongBits:
 516   case vmIntrinsics::_doubleToLongBits:
 517   case vmIntrinsics::_longBitsToDouble:
 518   case vmIntrinsics::_floatToFloat16:
 519   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 520 
 521   case vmIntrinsics::_floatIsFinite:
 522   case vmIntrinsics::_floatIsInfinite:
 523   case vmIntrinsics::_doubleIsFinite:
 524   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 525 
 526   case vmIntrinsics::_numberOfLeadingZeros_i:
 527   case vmIntrinsics::_numberOfLeadingZeros_l:
 528   case vmIntrinsics::_numberOfTrailingZeros_i:
 529   case vmIntrinsics::_numberOfTrailingZeros_l:
 530   case vmIntrinsics::_bitCount_i:
 531   case vmIntrinsics::_bitCount_l:

1972     return Type::AnyPtr;
1973   } else if (base_type == TypePtr::NULL_PTR) {
1974     // Since this is a NULL+long form, we have to switch to a rawptr.
1975     base   = _gvn.transform(new CastX2PNode(offset));
1976     offset = MakeConX(0);
1977     return Type::RawPtr;
1978   } else if (base_type->base() == Type::RawPtr) {
1979     return Type::RawPtr;
1980   } else if (base_type->isa_oopptr()) {
1981     // Base is never null => always a heap address.
1982     if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
1983       return Type::OopPtr;
1984     }
1985     // Offset is small => always a heap address.
1986     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
1987     if (offset_type != NULL &&
1988         base_type->offset() == 0 &&     // (should always be?)
1989         offset_type->_lo >= 0 &&
1990         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
1991       return Type::OopPtr;
1992     } else if (type == T_OBJECT) {
1993       // off heap access to an oop doesn't make any sense. Has to be on
1994       // heap.
1995       return Type::OopPtr;
1996     }
1997     // Otherwise, it might either be oop+off or NULL+addr.
1998     return Type::AnyPtr;
1999   } else {
2000     // No information:
2001     return Type::AnyPtr;
2002   }
2003 }
2004 
2005 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2006   Node* uncasted_base = base;
2007   int kind = classify_unsafe_addr(uncasted_base, offset, type);
2008   if (kind == Type::RawPtr) {
2009     return basic_plus_adr(top(), uncasted_base, offset);
2010   } else if (kind == Type::AnyPtr) {
2011     assert(base == uncasted_base, "unexpected base change");
2012     if (can_cast) {

2151     case vmIntrinsics::_remainderUnsigned_l: {
2152       zero_check_long(argument(2));
2153       // Compile-time detect of null-exception
2154       if (stopped()) {
2155         return true; // keep the graph constructed so far
2156       }
2157       n = new UModLNode(control(), argument(0), argument(2));
2158       break;
2159     }
2160     default:  fatal_unexpected_iid(id);  break;
2161   }
2162   set_result(_gvn.transform(n));
2163   return true;
2164 }
2165 
2166 //----------------------------inline_unsafe_access----------------------------
2167 
2168 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2169   // Attempt to infer a sharper value type from the offset and base type.
2170   ciKlass* sharpened_klass = NULL;

2171 
2172   // See if it is an instance field, with an object type.
2173   if (alias_type->field() != NULL) {
2174     if (alias_type->field()->type()->is_klass()) {
2175       sharpened_klass = alias_type->field()->type()->as_klass();

2176     }
2177   }
2178 
2179   const TypeOopPtr* result = NULL;
2180   // See if it is a narrow oop array.
2181   if (adr_type->isa_aryptr()) {
2182     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2183       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2184       if (elem_type != NULL && elem_type->is_loaded()) {
2185         // Sharpen the value type.
2186         result = elem_type;
2187       }
2188     }
2189   }
2190 
2191   // The sharpened class might be unloaded if there is no class loader
2192   // contraint in place.
2193   if (result == NULL && sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2194     // Sharpen the value type.
2195     result = TypeOopPtr::make_from_klass(sharpened_klass);



2196   }
2197   if (result != NULL) {
2198 #ifndef PRODUCT
2199     if (C->print_intrinsics() || C->print_inlining()) {
2200       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2201       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2202     }
2203 #endif
2204   }
2205   return result;
2206 }
2207 
2208 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2209   switch (kind) {
2210       case Relaxed:
2211         return MO_UNORDERED;
2212       case Opaque:
2213         return MO_RELAXED;
2214       case Acquire:
2215         return MO_ACQUIRE;

2230   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2231   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2232 
2233   if (is_reference_type(type)) {
2234     decorators |= ON_UNKNOWN_OOP_REF;
2235   }
2236 
2237   if (unaligned) {
2238     decorators |= C2_UNALIGNED;
2239   }
2240 
2241 #ifndef PRODUCT
2242   {
2243     ResourceMark rm;
2244     // Check the signatures.
2245     ciSignature* sig = callee()->signature();
2246 #ifdef ASSERT
2247     if (!is_store) {
2248       // Object getReference(Object base, int/long offset), etc.
2249       BasicType rtype = sig->return_type()->basic_type();
2250       assert(rtype == type, "getter must return the expected value");
2251       assert(sig->count() == 2, "oop getter has 2 arguments");
2252       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2253       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2254     } else {
2255       // void putReference(Object base, int/long offset, Object x), etc.
2256       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2257       assert(sig->count() == 3, "oop putter has 3 arguments");
2258       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2259       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2260       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2261       assert(vtype == type, "putter must accept the expected value");
2262     }
2263 #endif // ASSERT
2264  }
2265 #endif //PRODUCT
2266 
2267   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2268 
2269   Node* receiver = argument(0);  // type: oop
2270 
2271   // Build address expression.
2272   Node* heap_base_oop = top();
2273 
2274   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2275   Node* base = argument(1);  // type: oop
2276   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2277   Node* offset = argument(2);  // type: long
2278   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2279   // to be plain byte offsets, which are also the same as those accepted
2280   // by oopDesc::field_addr.
2281   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2282          "fieldOffset must be byte-scaled");























































2283   // 32-bit machines ignore the high half!
2284   offset = ConvL2X(offset);
2285 
2286   // Save state and restore on bailout
2287   uint old_sp = sp();
2288   SafePointNode* old_map = clone_map();
2289 
2290   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2291 
2292   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2293     if (type != T_OBJECT) {
2294       decorators |= IN_NATIVE; // off-heap primitive access
2295     } else {
2296       set_map(old_map);
2297       set_sp(old_sp);
2298       return false; // off-heap oop accesses are not supported
2299     }
2300   } else {
2301     heap_base_oop = base; // on-heap or mixed access
2302   }
2303 
2304   // Can base be NULL? Otherwise, always on-heap access.
2305   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2306 
2307   if (!can_access_non_heap) {
2308     decorators |= IN_HEAP;
2309   }
2310 
2311   Node* val = is_store ? argument(4) : NULL;
2312 
2313   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2314   if (adr_type == TypePtr::NULL_PTR) {
2315     set_map(old_map);
2316     set_sp(old_sp);
2317     return false; // off-heap access with zero address
2318   }
2319 
2320   // Try to categorize the address.
2321   Compile::AliasType* alias_type = C->alias_type(adr_type);
2322   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2323 
2324   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2325       alias_type->adr_type() == TypeAryPtr::RANGE) {
2326     set_map(old_map);
2327     set_sp(old_sp);
2328     return false; // not supported
2329   }
2330 
2331   bool mismatched = false;
2332   BasicType bt = alias_type->basic_type();
























2333   if (bt != T_ILLEGAL) {
2334     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");



2335     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2336       // Alias type doesn't differentiate between byte[] and boolean[]).
2337       // Use address type to get the element type.
2338       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2339     }
2340     if (is_reference_type(bt, true)) {
2341       // accessing an array field with getReference is not a mismatch
2342       bt = T_OBJECT;
2343     }
2344     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2345       // Don't intrinsify mismatched object accesses
2346       set_map(old_map);
2347       set_sp(old_sp);
2348       return false;
2349     }
2350     mismatched = (bt != type);
2351   } else if (alias_type->adr_type()->isa_oopptr()) {
2352     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2353   }
2354 























2355   old_map->destruct(&_gvn);
2356   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2357 
2358   if (mismatched) {
2359     decorators |= C2_MISMATCHED;
2360   }
2361 
2362   // First guess at the value type.
2363   const Type *value_type = Type::get_const_basic_type(type);
2364 
2365   // Figure out the memory ordering.
2366   decorators |= mo_decorator_for_access_kind(kind);
2367 
2368   if (!is_store && type == T_OBJECT) {
2369     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2370     if (tjp != NULL) {
2371       value_type = tjp;




2372     }
2373   }
2374 
2375   receiver = null_check(receiver);
2376   if (stopped()) {
2377     return true;
2378   }
2379   // Heap pointers get a null-check from the interpreter,
2380   // as a courtesy.  However, this is not guaranteed by Unsafe,
2381   // and it is not possible to fully distinguish unintended nulls
2382   // from intended ones in this API.
2383 
2384   if (!is_store) {
2385     Node* p = NULL;
2386     // Try to constant fold a load from a constant field
2387     ciField* field = alias_type->field();
2388     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2389       // final or stable field
2390       p = make_constant_from_field(field, heap_base_oop);
2391     }
2392 
2393     if (p == NULL) { // Could not constant fold the load
2394       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2395       // Normalize the value returned by getBoolean in the following cases
2396       if (type == T_BOOLEAN &&
2397           (mismatched ||
2398            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2399            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2400                                                       //   and the unsafe access is made to large offset
2401                                                       //   (i.e., larger than the maximum offset necessary for any
2402                                                       //   field access)
2403             ) {
2404           IdealKit ideal = IdealKit(this);
2405 #define __ ideal.
2406           IdealVariable normalized_result(ideal);
2407           __ declarations_done();
2408           __ set(normalized_result, p);
2409           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2410           __ set(normalized_result, ideal.ConI(1));
2411           ideal.end_if();
2412           final_sync(ideal);
2413           p = __ value(normalized_result);
2414 #undef __
2415       }
2416     }
2417     if (type == T_ADDRESS) {
2418       p = gvn().transform(new CastP2XNode(NULL, p));
2419       p = ConvX2UL(p);
2420     }
2421     // The load node has the control of the preceding MemBarCPUOrder.  All
2422     // following nodes will have the control of the MemBarCPUOrder inserted at
2423     // the end of this method.  So, pushing the load onto the stack at a later
2424     // point is fine.
2425     set_result(p);
2426   } else {
2427     if (bt == T_ADDRESS) {
2428       // Repackage the long as a pointer.
2429       val = ConvL2X(val);
2430       val = gvn().transform(new CastX2PNode(val));
2431     }
2432     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
















2433   }
2434 
2435   return true;
2436 }
2437 








































2438 //----------------------------inline_unsafe_load_store----------------------------
2439 // This method serves a couple of different customers (depending on LoadStoreKind):
2440 //
2441 // LS_cmp_swap:
2442 //
2443 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2444 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2445 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2446 //
2447 // LS_cmp_swap_weak:
2448 //
2449 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2450 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2451 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2452 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2453 //
2454 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2455 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2456 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2457 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2623     }
2624     case LS_cmp_swap:
2625     case LS_cmp_swap_weak:
2626     case LS_get_add:
2627       break;
2628     default:
2629       ShouldNotReachHere();
2630   }
2631 
2632   // Null check receiver.
2633   receiver = null_check(receiver);
2634   if (stopped()) {
2635     return true;
2636   }
2637 
2638   int alias_idx = C->get_alias_index(adr_type);
2639 
2640   if (is_reference_type(type)) {
2641     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2642 













2643     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2644     // could be delayed during Parse (for example, in adjust_map_after_if()).
2645     // Execute transformation here to avoid barrier generation in such case.
2646     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2647       newval = _gvn.makecon(TypePtr::NULL_PTR);
2648 
2649     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2650       // Refine the value to a null constant, when it is known to be null
2651       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2652     }
2653   }
2654 
2655   Node* result = NULL;
2656   switch (kind) {
2657     case LS_cmp_exchange: {
2658       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2659                                             oldval, newval, value_type, type, decorators);
2660       break;
2661     }
2662     case LS_cmp_swap_weak:

2784   Node* cls = null_check(argument(1));
2785   if (stopped())  return true;
2786 
2787   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2788   kls = null_check(kls);
2789   if (stopped())  return true;  // argument was like int.class
2790 
2791   Node* test = NULL;
2792   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2793     // Note:  The argument might still be an illegal value like
2794     // Serializable.class or Object[].class.   The runtime will handle it.
2795     // But we must make an explicit check for initialization.
2796     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2797     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2798     // can generate code to load it as unsigned byte.
2799     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2800     Node* bits = intcon(InstanceKlass::fully_initialized);
2801     test = _gvn.transform(new SubINode(inst, bits));
2802     // The 'test' is non-zero if we need to take a slow path.
2803   }
2804 
2805   Node* obj = new_instance(kls, test);





2806   set_result(obj);
2807   return true;
2808 }
2809 
2810 //------------------------inline_native_time_funcs--------------
2811 // inline code for System.currentTimeMillis() and System.nanoTime()
2812 // these have the same type and signature
2813 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2814   const TypeFunc* tf = OptoRuntime::void_long_Type();
2815   const TypePtr* no_memory_effects = NULL;
2816   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2817   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2818 #ifdef ASSERT
2819   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2820   assert(value_top == top(), "second value must be top");
2821 #endif
2822   set_result(value);
2823   return true;
2824 }
2825 

3365 
3366   Node* thread = _gvn.transform(new ThreadLocalNode());
3367   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3368   // We cannot use immutable_memory() because we might flip onto a
3369   // different carrier thread, at which point we'll need to use that
3370   // carrier thread's cache.
3371   // return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3372   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3373   return make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3374 }
3375 
3376 //------------------------inline_native_scopedValueCache------------------
3377 bool LibraryCallKit::inline_native_scopedValueCache() {
3378   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3379   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3380   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3381 
3382   // Because we create the scopedValue cache lazily we have to make the
3383   // type of the result BotPTR.
3384   bool xk = etype->klass_is_exact();
3385   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3386   Node* cache_obj_handle = scopedValueCache_helper();
3387   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3388 
3389   return true;
3390 }
3391 
3392 //------------------------inline_native_setScopedValueCache------------------
3393 bool LibraryCallKit::inline_native_setScopedValueCache() {
3394   Node* arr = argument(0);
3395   Node* cache_obj_handle = scopedValueCache_helper();
3396 
3397   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3398   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3399                   MemNode::unordered);
3400 
3401   return true;
3402 }
3403 
3404 //---------------------------load_mirror_from_klass----------------------------
3405 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3406 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3407   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3408   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3409   // mirror = ((OopHandle)mirror)->resolve();
3410   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3411 }
3412 
3413 //-----------------------load_klass_from_mirror_common-------------------------
3414 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3415 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3416 // and branch to the given path on the region.
3417 // If never_see_null, take an uncommon trap on null, so we can optimistically
3418 // compile for the non-null case.
3419 // If the region is NULL, force never_see_null = true.
3420 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3421                                                     bool never_see_null,
3422                                                     RegionNode* region,
3423                                                     int null_path,
3424                                                     int offset) {
3425   if (region == NULL)  never_see_null = true;
3426   Node* p = basic_plus_adr(mirror, offset);
3427   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3428   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3429   Node* null_ctl = top();
3430   kls = null_check_oop(kls, &null_ctl, never_see_null);
3431   if (region != NULL) {
3432     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3435     assert(null_ctl == top(), "no loose ends");
3436   }
3437   return kls;
3438 }
3439 
3440 //--------------------(inline_native_Class_query helpers)---------------------
3441 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3442 // Fall through if (mods & mask) == bits, take the guard otherwise.
3443 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3444   // Branch around if the given klass has the given modifier bit set.
3445   // Like generate_guard, adds a new path onto the region.
3446   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3447   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3448   Node* mask = intcon(modifier_mask);
3449   Node* bits = intcon(modifier_bits);
3450   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3451   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3452   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3453   return generate_fair_guard(bol, region);
3454 }

3455 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3456   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3457 }
3458 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3459   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3460 }
3461 
3462 //-------------------------inline_native_Class_query-------------------
3463 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3464   const Type* return_type = TypeInt::BOOL;
3465   Node* prim_return_value = top();  // what happens if it's a primitive class?
3466   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3467   bool expect_prim = false;     // most of these guys expect to work on refs
3468 
3469   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3470 
3471   Node* mirror = argument(0);
3472   Node* obj    = top();
3473 
3474   switch (id) {

3628 
3629   case vmIntrinsics::_getClassAccessFlags:
3630     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3631     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3632     break;
3633 
3634   default:
3635     fatal_unexpected_iid(id);
3636     break;
3637   }
3638 
3639   // Fall-through is the normal case of a query to a real class.
3640   phi->init_req(1, query_value);
3641   region->init_req(1, control());
3642 
3643   C->set_has_split_ifs(true); // Has chance for split-if optimization
3644   set_result(region, phi);
3645   return true;
3646 }
3647 































3648 //-------------------------inline_Class_cast-------------------
3649 bool LibraryCallKit::inline_Class_cast() {
3650   Node* mirror = argument(0); // Class
3651   Node* obj    = argument(1);
3652   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3653   if (mirror_con == NULL) {
3654     return false;  // dead path (mirror->is_top()).
3655   }
3656   if (obj == NULL || obj->is_top()) {
3657     return false;  // dead path
3658   }
3659   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3660 
3661   // First, see if Class.cast() can be folded statically.
3662   // java_mirror_type() returns non-null for compile-time Class constants.
3663   ciType* tm = mirror_con->java_mirror_type();

3664   if (tm != NULL && tm->is_klass() &&
3665       tp != NULL) {
3666     if (!tp->is_loaded()) {
3667       // Don't use intrinsic when class is not loaded.
3668       return false;
3669     } else {
3670       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
3671       if (static_res == Compile::SSC_always_true) {
3672         // isInstance() is true - fold the code.



3673         set_result(obj);
3674         return true;
3675       } else if (static_res == Compile::SSC_always_false) {
3676         // Don't use intrinsic, have to throw ClassCastException.
3677         // If the reference is null, the non-intrinsic bytecode will
3678         // be optimized appropriately.
3679         return false;
3680       }
3681     }
3682   }
3683 
3684   // Bailout intrinsic and do normal inlining if exception path is frequent.
3685   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3686     return false;
3687   }
3688 
3689   // Generate dynamic checks.
3690   // Class.cast() is java implementation of _checkcast bytecode.
3691   // Do checkcast (Parse::do_checkcast()) optimizations here.
3692 



3693   mirror = null_check(mirror);
3694   // If mirror is dead, only null-path is taken.
3695   if (stopped()) {
3696     return true;
3697   }
3698 
3699   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3700   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3701   RegionNode* region = new RegionNode(PATH_LIMIT);
3702   record_for_igvn(region);
3703 
3704   // Now load the mirror's klass metaobject, and null-check it.
3705   // If kls is null, we have a primitive mirror and
3706   // nothing is an instance of a primitive type.
3707   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3708 
3709   Node* res = top();


3710   if (!stopped()) {





















3711     Node* bad_type_ctrl = top();
3712     // Do checkcast optimizations.
3713     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3714     region->init_req(_bad_type_path, bad_type_ctrl);
3715   }
3716   if (region->in(_prim_path) != top() ||
3717       region->in(_bad_type_path) != top()) {

3718     // Let Interpreter throw ClassCastException.
3719     PreserveJVMState pjvms(this);
3720     set_control(_gvn.transform(region));



3721     uncommon_trap(Deoptimization::Reason_intrinsic,
3722                   Deoptimization::Action_maybe_recompile);
3723   }
3724   if (!stopped()) {
3725     set_result(res);
3726   }
3727   return true;
3728 }
3729 
3730 
3731 //--------------------------inline_native_subtype_check------------------------
3732 // This intrinsic takes the JNI calls out of the heart of
3733 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3734 bool LibraryCallKit::inline_native_subtype_check() {
3735   // Pull both arguments off the stack.
3736   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3737   args[0] = argument(0);
3738   args[1] = argument(1);
3739   Node* klasses[2];             // corresponding Klasses: superk, subk
3740   klasses[0] = klasses[1] = top();
3741 
3742   enum {
3743     // A full decision tree on {superc is prim, subc is prim}:
3744     _prim_0_path = 1,           // {P,N} => false
3745                                 // {P,P} & superc!=subc => false
3746     _prim_same_path,            // {P,P} & superc==subc => true
3747     _prim_1_path,               // {N,P} => false
3748     _ref_subtype_path,          // {N,N} & subtype check wins => true
3749     _both_ref_path,             // {N,N} & subtype check loses => false
3750     PATH_LIMIT
3751   };
3752 
3753   RegionNode* region = new RegionNode(PATH_LIMIT);

3754   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3755   record_for_igvn(region);

3756 
3757   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3758   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3759   int class_klass_offset = java_lang_Class::klass_offset();
3760 
3761   // First null-check both mirrors and load each mirror's klass metaobject.
3762   int which_arg;
3763   for (which_arg = 0; which_arg <= 1; which_arg++) {
3764     Node* arg = args[which_arg];
3765     arg = null_check(arg);
3766     if (stopped())  break;
3767     args[which_arg] = arg;
3768 
3769     Node* p = basic_plus_adr(arg, class_klass_offset);
3770     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3771     klasses[which_arg] = _gvn.transform(kls);
3772   }
3773 
3774   // Having loaded both klasses, test each for null.
3775   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3776   for (which_arg = 0; which_arg <= 1; which_arg++) {
3777     Node* kls = klasses[which_arg];
3778     Node* null_ctl = top();
3779     kls = null_check_oop(kls, &null_ctl, never_see_null);
3780     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3781     region->init_req(prim_path, null_ctl);



3782     if (stopped())  break;
3783     klasses[which_arg] = kls;
3784   }
3785 
3786   if (!stopped()) {
3787     // now we have two reference types, in klasses[0..1]
3788     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3789     Node* superk = klasses[0];  // the receiver
3790     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));



3791     // now we have a successful reference subtype check
3792     region->set_req(_ref_subtype_path, control());
3793   }
3794 
3795   // If both operands are primitive (both klasses null), then
3796   // we must return true when they are identical primitives.
3797   // It is convenient to test this after the first null klass check.
3798   set_control(region->in(_prim_0_path)); // go back to first null check

3799   if (!stopped()) {
3800     // Since superc is primitive, make a guard for the superc==subc case.
3801     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3802     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3803     generate_guard(bol_eq, region, PROB_FAIR);
3804     if (region->req() == PATH_LIMIT+1) {
3805       // A guard was added.  If the added guard is taken, superc==subc.
3806       region->swap_edges(PATH_LIMIT, _prim_same_path);
3807       region->del_req(PATH_LIMIT);
3808     }
3809     region->set_req(_prim_0_path, control()); // Not equal after all.
3810   }
3811 
3812   // these are the only paths that produce 'true':
3813   phi->set_req(_prim_same_path,   intcon(1));
3814   phi->set_req(_ref_subtype_path, intcon(1));
3815 
3816   // pull together the cases:
3817   assert(region->req() == PATH_LIMIT, "sane region");
3818   for (uint i = 1; i < region->req(); i++) {
3819     Node* ctl = region->in(i);
3820     if (ctl == NULL || ctl == top()) {
3821       region->set_req(i, top());
3822       phi   ->set_req(i, top());
3823     } else if (phi->in(i) == NULL) {
3824       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3825     }
3826   }
3827 
3828   set_control(_gvn.transform(region));
3829   set_result(_gvn.transform(phi));
3830   return true;
3831 }
3832 
3833 //---------------------generate_array_guard_common------------------------
3834 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3835                                                   bool obj_array, bool not_array) {
3836 
3837   if (stopped()) {
3838     return NULL;
3839   }
3840 
3841   // If obj_array/non_array==false/false:
3842   // Branch around if the given klass is in fact an array (either obj or prim).
3843   // If obj_array/non_array==false/true:
3844   // Branch around if the given klass is not an array klass of any kind.
3845   // If obj_array/non_array==true/true:
3846   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3847   // If obj_array/non_array==true/false:
3848   // Branch around if the kls is an oop array (Object[] or subtype)
3849   //
3850   // Like generate_guard, adds a new path onto the region.
3851   jint  layout_con = 0;
3852   Node* layout_val = get_layout_helper(kls, layout_con);
3853   if (layout_val == NULL) {
3854     bool query = (obj_array
3855                   ? Klass::layout_helper_is_objArray(layout_con)
3856                   : Klass::layout_helper_is_array(layout_con));
3857     if (query == not_array) {







3858       return NULL;                       // never a branch
3859     } else {                             // always a branch
3860       Node* always_branch = control();
3861       if (region != NULL)
3862         region->add_req(always_branch);
3863       set_control(top());
3864       return always_branch;
3865     }
3866   }





















3867   // Now test the correct condition.
3868   jint  nval = (obj_array
3869                 ? (jint)(Klass::_lh_array_tag_type_value
3870                    <<    Klass::_lh_array_tag_shift)
3871                 : Klass::_lh_neutral_value);
3872   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3873   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3874   // invert the test if we are looking for a non-array
3875   if (not_array)  btest = BoolTest(btest).negate();
3876   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3877   return generate_fair_guard(bol, region);
3878 }
3879 
3880 
3881 //-----------------------inline_native_newArray--------------------------
3882 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3883 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3884 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3885   Node* mirror;
3886   Node* count_val;
3887   if (uninitialized) {
3888     mirror    = argument(1);
3889     count_val = argument(2);
3890   } else {
3891     mirror    = argument(0);
3892     count_val = argument(1);
3893   }
3894 
3895   mirror = null_check(mirror);
3896   // If mirror or obj is dead, only null-path is taken.
3897   if (stopped())  return true;
3898 
3899   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3900   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3901   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3902   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

4007   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4008   { PreserveReexecuteState preexecs(this);
4009     jvms()->set_should_reexecute(true);
4010 
4011     array_type_mirror = null_check(array_type_mirror);
4012     original          = null_check(original);
4013 
4014     // Check if a null path was taken unconditionally.
4015     if (stopped())  return true;
4016 
4017     Node* orig_length = load_array_length(original);
4018 
4019     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
4020     klass_node = null_check(klass_node);
4021 
4022     RegionNode* bailout = new RegionNode(1);
4023     record_for_igvn(bailout);
4024 
4025     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4026     // Bail out if that is so.
4027     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4028     if (not_objArray != NULL) {
4029       // Improve the klass node's type from the new optimistic assumption:
4030       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4031       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4032       Node* cast = new CastPPNode(klass_node, akls);
4033       cast->init_req(0, control());
4034       klass_node = _gvn.transform(cast);
4035     }
4036 
4037     // Bail out if either start or end is negative.
4038     generate_negative_guard(start, bailout, &start);
4039     generate_negative_guard(end,   bailout, &end);
4040 
4041     Node* length = end;
4042     if (_gvn.type(start) != TypeInt::ZERO) {
4043       length = _gvn.transform(new SubINode(end, start));
4044     }
4045 
4046     // Bail out if length is negative.
4047     // Without this the new_array would throw
4048     // NegativeArraySizeException but IllegalArgumentException is what
4049     // should be thrown
4050     generate_negative_guard(length, bailout, &length);
4051 
































4052     if (bailout->req() > 1) {
4053       PreserveJVMState pjvms(this);
4054       set_control(_gvn.transform(bailout));
4055       uncommon_trap(Deoptimization::Reason_intrinsic,
4056                     Deoptimization::Action_maybe_recompile);
4057     }
4058 
4059     if (!stopped()) {
4060       // How many elements will we copy from the original?
4061       // The answer is MinI(orig_length - start, length).
4062       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4063       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4064 
4065       // Generate a direct call to the right arraycopy function(s).
4066       // We know the copy is disjoint but we might not know if the
4067       // oop stores need checking.
4068       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4069       // This will fail a store-check if x contains any non-nulls.
4070 
4071       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4074       // to the copyOf to be validated, including that the copy to the
4075       // new array won't trigger an ArrayStoreException. That subtype
4076       // check can be optimized if we know something on the type of
4077       // the input array from type speculation.
4078       if (_gvn.type(klass_node)->singleton()) {
4079         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4080         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4081 
4082         int test = C->static_subtype_check(superk, subk);
4083         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4084           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4085           if (t_original->speculative_type() != NULL) {
4086             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4087           }
4088         }
4089       }
4090 
4091       bool validated = false;
4092       // Reason_class_check rather than Reason_intrinsic because we
4093       // want to intrinsify even if this traps.
4094       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4095         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4096 
4097         if (not_subtype_ctrl != top()) {
4098           PreserveJVMState pjvms(this);
4099           set_control(not_subtype_ctrl);
4100           uncommon_trap(Deoptimization::Reason_class_check,
4101                         Deoptimization::Action_make_not_entrant);
4102           assert(stopped(), "Should be stopped");
4103         }
4104         validated = true;
4105       }
4106 
4107       if (!stopped()) {
4108         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4109 
4110         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4111                                                 load_object_klass(original), klass_node);
4112         if (!is_copyOfRange) {
4113           ac->set_copyof(validated);
4114         } else {

4213   set_edges_for_java_call(slow_call);
4214   return slow_call;
4215 }
4216 
4217 
4218 /**
4219  * Build special case code for calls to hashCode on an object. This call may
4220  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4221  * slightly different code.
4222  */
4223 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4224   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4225   assert(!(is_virtual && is_static), "either virtual, special, or static");
4226 
4227   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4228 
4229   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4230   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4231   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4232   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4233   Node* obj = NULL;





4234   if (!is_static) {
4235     // Check for hashing null object
4236     obj = null_check_receiver();
4237     if (stopped())  return true;        // unconditionally null
4238     result_reg->init_req(_null_path, top());
4239     result_val->init_req(_null_path, top());
4240   } else {
4241     // Do a null check, and return zero if null.
4242     // System.identityHashCode(null) == 0
4243     obj = argument(0);
4244     Node* null_ctl = top();
4245     obj = null_check_oop(obj, &null_ctl);
4246     result_reg->init_req(_null_path, null_ctl);
4247     result_val->init_req(_null_path, _gvn.intcon(0));
4248   }
4249 
4250   // Unconditionally null?  Then return right away.
4251   if (stopped()) {
4252     set_control( result_reg->in(_null_path));
4253     if (!stopped())
4254       set_result(result_val->in(_null_path));
4255     return true;
4256   }
4257 
4258   // We only go to the fast case code if we pass a number of guards.  The
4259   // paths which do not pass are accumulated in the slow_region.
4260   RegionNode* slow_region = new RegionNode(1);
4261   record_for_igvn(slow_region);
4262 
4263   // If this is a virtual call, we generate a funny guard.  We pull out
4264   // the vtable entry corresponding to hashCode() from the target object.
4265   // If the target method which we are calling happens to be the native
4266   // Object hashCode() method, we pass the guard.  We do not need this
4267   // guard for non-virtual calls -- the caller is known to be the native
4268   // Object hashCode().
4269   if (is_virtual) {
4270     // After null check, get the object's klass.
4271     Node* obj_klass = load_object_klass(obj);
4272     generate_virtual_guard(obj_klass, slow_region);
4273   }
4274 
4275   // Get the header out of the object, use LoadMarkNode when available
4276   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4277   // The control of the load must be NULL. Otherwise, the load can move before
4278   // the null check after castPP removal.
4279   Node* no_ctrl = NULL;
4280   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4281 
4282   // Test the header to see if it is unlocked.
4283   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4284   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4285   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4286   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4287   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4288 
4289   generate_slow_guard(test_unlocked, slow_region);
4290 
4291   // Get the hash value and check to see that it has been properly assigned.
4292   // We depend on hash_mask being at most 32 bits and avoid the use of
4293   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4294   // vm: see markWord.hpp.
4295   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4296   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4297   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4298   // This hack lets the hash bits live anywhere in the mark object now, as long
4299   // as the shift drops the relevant bits into the low 32 bits.  Note that
4300   // Java spec says that HashCode is an int so there's no point in capturing
4301   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4302   hshifted_header      = ConvX2I(hshifted_header);
4303   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4329     // this->control() comes from set_results_for_java_call
4330     result_reg->init_req(_slow_path, control());
4331     result_val->init_req(_slow_path, slow_result);
4332     result_io  ->set_req(_slow_path, i_o());
4333     result_mem ->set_req(_slow_path, reset_memory());
4334   }
4335 
4336   // Return the combined state.
4337   set_i_o(        _gvn.transform(result_io)  );
4338   set_all_memory( _gvn.transform(result_mem));
4339 
4340   set_result(result_reg, result_val);
4341   return true;
4342 }
4343 
4344 //---------------------------inline_native_getClass----------------------------
4345 // public final native Class<?> java.lang.Object.getClass();
4346 //
4347 // Build special case code for calls to getClass on an object.
4348 bool LibraryCallKit::inline_native_getClass() {
4349   Node* obj = null_check_receiver();









4350   if (stopped())  return true;
4351   set_result(load_mirror_from_klass(load_object_klass(obj)));
4352   return true;
4353 }
4354 
4355 //-----------------inline_native_Reflection_getCallerClass---------------------
4356 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4357 //
4358 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4359 //
4360 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4361 // in that it must skip particular security frames and checks for
4362 // caller sensitive methods.
4363 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4364 #ifndef PRODUCT
4365   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4366     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4367   }
4368 #endif
4369 

4694 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4695 //
4696 // The general case has two steps, allocation and copying.
4697 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4698 //
4699 // Copying also has two cases, oop arrays and everything else.
4700 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4701 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4702 //
4703 // These steps fold up nicely if and when the cloned object's klass
4704 // can be sharply typed as an object array, a type array, or an instance.
4705 //
4706 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4707   PhiNode* result_val;
4708 
4709   // Set the reexecute bit for the interpreter to reexecute
4710   // the bytecode that invokes Object.clone if deoptimization happens.
4711   { PreserveReexecuteState preexecs(this);
4712     jvms()->set_should_reexecute(true);
4713 
4714     Node* obj = null_check_receiver();

4715     if (stopped())  return true;
4716 
4717     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4718 
4719     // If we are going to clone an instance, we need its exact type to
4720     // know the number and types of fields to convert the clone to
4721     // loads/stores. Maybe a speculative type can help us.
4722     if (!obj_type->klass_is_exact() &&
4723         obj_type->speculative_type() != NULL &&
4724         obj_type->speculative_type()->is_instance_klass()) {

4725       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4726       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4727           !spec_ik->has_injected_fields()) {
4728         if (!obj_type->isa_instptr() ||
4729             obj_type->is_instptr()->instance_klass()->has_subklass()) {
4730           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4731         }
4732       }
4733     }
4734 
4735     // Conservatively insert a memory barrier on all memory slices.
4736     // Do not let writes into the original float below the clone.
4737     insert_mem_bar(Op_MemBarCPUOrder);
4738 
4739     // paths into result_reg:
4740     enum {
4741       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4742       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4743       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4744       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4745       PATH_LIMIT
4746     };
4747     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4748     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4749     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4750     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4751     record_for_igvn(result_reg);
4752 
4753     Node* obj_klass = load_object_klass(obj);





4754     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4755     if (array_ctl != NULL) {
4756       // It's an array.
4757       PreserveJVMState pjvms(this);
4758       set_control(array_ctl);
4759       Node* obj_length = load_array_length(obj);
4760       Node* obj_size  = NULL;
4761       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4762 
4763       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4764       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4765         // If it is an oop array, it requires very special treatment,
4766         // because gc barriers are required when accessing the array.
4767         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4768         if (is_obja != NULL) {
4769           PreserveJVMState pjvms2(this);
4770           set_control(is_obja);
4771           // Generate a direct call to the right arraycopy function(s).
4772           // Clones are always tightly coupled.
4773           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4774           ac->set_clone_oop_array();
4775           Node* n = _gvn.transform(ac);
4776           assert(n == ac, "cannot disappear");
4777           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4778 
4779           result_reg->init_req(_objArray_path, control());
4780           result_val->init_req(_objArray_path, alloc_obj);
4781           result_i_o ->set_req(_objArray_path, i_o());
4782           result_mem ->set_req(_objArray_path, reset_memory());
4783         }
4784       }
4785       // Otherwise, there are no barriers to worry about.
4786       // (We can dispense with card marks if we know the allocation
4787       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4788       //  causes the non-eden paths to take compensating steps to
4789       //  simulate a fresh allocation, so that no further
4790       //  card marks are required in compiled code to initialize
4791       //  the object.)
4792 
4793       if (!stopped()) {
4794         copy_to_clone(obj, alloc_obj, obj_size, true);
4795 
4796         // Present the results of the copy.
4797         result_reg->init_req(_array_path, control());
4798         result_val->init_req(_array_path, alloc_obj);
4799         result_i_o ->set_req(_array_path, i_o());
4800         result_mem ->set_req(_array_path, reset_memory());




































4801       }
4802     }
4803 
4804     // We only go to the instance fast case code if we pass a number of guards.
4805     // The paths which do not pass are accumulated in the slow_region.
4806     RegionNode* slow_region = new RegionNode(1);
4807     record_for_igvn(slow_region);
4808     if (!stopped()) {
4809       // It's an instance (we did array above).  Make the slow-path tests.
4810       // If this is a virtual call, we generate a funny guard.  We grab
4811       // the vtable entry corresponding to clone() from the target object.
4812       // If the target method which we are calling happens to be the
4813       // Object clone() method, we pass the guard.  We do not need this
4814       // guard for non-virtual calls; the caller is known to be the native
4815       // Object clone().
4816       if (is_virtual) {
4817         generate_virtual_guard(obj_klass, slow_region);
4818       }
4819 
4820       // The object must be easily cloneable and must not have a finalizer.
4821       // Both of these conditions may be checked in a single test.
4822       // We could optimize the test further, but we don't care.
4823       generate_access_flags_guard(obj_klass,
4824                                   // Test both conditions:
4825                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4826                                   // Must be cloneable but not finalizer:
4827                                   JVM_ACC_IS_CLONEABLE_FAST,

4948 // array in the heap that GCs wouldn't expect. Move the allocation
4949 // after the traps so we don't allocate the array if we
4950 // deoptimize. This is possible because tightly_coupled_allocation()
4951 // guarantees there's no observer of the allocated array at this point
4952 // and the control flow is simple enough.
4953 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4954                                                     int saved_reexecute_sp, uint new_idx) {
4955   if (saved_jvms != NULL && !stopped()) {
4956     assert(alloc != NULL, "only with a tightly coupled allocation");
4957     // restore JVM state to the state at the arraycopy
4958     saved_jvms->map()->set_control(map()->control());
4959     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4960     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4961     // If we've improved the types of some nodes (null check) while
4962     // emitting the guards, propagate them to the current state
4963     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4964     set_jvms(saved_jvms);
4965     _reexecute_sp = saved_reexecute_sp;
4966 
4967     // Remove the allocation from above the guards
4968     CallProjections callprojs;
4969     alloc->extract_projections(&callprojs, true);
4970     InitializeNode* init = alloc->initialization();
4971     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4972     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4973     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4974 
4975     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4976     // the allocation (i.e. is only valid if the allocation succeeds):
4977     // 1) replace CastIINode with AllocateArrayNode's length here
4978     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4979     //
4980     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4981     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4982     Node* init_control = init->proj_out(TypeFunc::Control);
4983     Node* alloc_length = alloc->Ideal_length();
4984 #ifdef ASSERT
4985     Node* prev_cast = NULL;
4986 #endif
4987     for (uint i = 0; i < init_control->outcnt(); i++) {
4988       Node* init_out = init_control->raw_out(i);
4989       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4990 #ifdef ASSERT
4991         if (prev_cast == NULL) {
4992           prev_cast = init_out;

4994           if (prev_cast->cmp(*init_out) == false) {
4995             prev_cast->dump();
4996             init_out->dump();
4997             assert(false, "not equal CastIINode");
4998           }
4999         }
5000 #endif
5001         C->gvn_replace_by(init_out, alloc_length);
5002       }
5003     }
5004     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5005 
5006     // move the allocation here (after the guards)
5007     _gvn.hash_delete(alloc);
5008     alloc->set_req(TypeFunc::Control, control());
5009     alloc->set_req(TypeFunc::I_O, i_o());
5010     Node *mem = reset_memory();
5011     set_all_memory(mem);
5012     alloc->set_req(TypeFunc::Memory, mem);
5013     set_control(init->proj_out_or_null(TypeFunc::Control));
5014     set_i_o(callprojs.fallthrough_ioproj);
5015 
5016     // Update memory as done in GraphKit::set_output_for_allocation()
5017     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5018     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5019     if (ary_type->isa_aryptr() && length_type != NULL) {
5020       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5021     }
5022     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5023     int            elemidx  = C->get_alias_index(telemref);
5024     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5025     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5026 
5027     Node* allocx = _gvn.transform(alloc);
5028     assert(allocx == alloc, "where has the allocation gone?");
5029     assert(dest->is_CheckCastPP(), "not an allocation result?");
5030 
5031     _gvn.hash_delete(dest);
5032     dest->set_req(0, control());
5033     Node* destx = _gvn.transform(dest);
5034     assert(destx == dest, "where has the allocation result gone?");

5143         top_src  = src_type->isa_aryptr();
5144         has_src = (top_src != NULL && top_src->elem() != Type::BOTTOM);
5145         src_spec = true;
5146       }
5147       if (!has_dest) {
5148         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5149         dest_type  = _gvn.type(dest);
5150         top_dest  = dest_type->isa_aryptr();
5151         has_dest = (top_dest != NULL && top_dest->elem() != Type::BOTTOM);
5152         dest_spec = true;
5153       }
5154     }
5155   }
5156 
5157   if (has_src && has_dest && can_emit_guards) {
5158     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5159     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5160     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5161     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5162 
5163     if (src_elem == dest_elem && src_elem == T_OBJECT) {
5164       // If both arrays are object arrays then having the exact types
5165       // for both will remove the need for a subtype check at runtime
5166       // before the call and may make it possible to pick a faster copy
5167       // routine (without a subtype check on every element)
5168       // Do we have the exact type of src?
5169       bool could_have_src = src_spec;
5170       // Do we have the exact type of dest?
5171       bool could_have_dest = dest_spec;
5172       ciKlass* src_k = NULL;
5173       ciKlass* dest_k = NULL;
5174       if (!src_spec) {
5175         src_k = src_type->speculative_type_not_null();
5176         if (src_k != NULL && src_k->is_array_klass()) {
5177           could_have_src = true;
5178         }
5179       }
5180       if (!dest_spec) {
5181         dest_k = dest_type->speculative_type_not_null();
5182         if (dest_k != NULL && dest_k->is_array_klass()) {
5183           could_have_dest = true;
5184         }
5185       }
5186       if (could_have_src && could_have_dest) {
5187         // If we can have both exact types, emit the missing guards
5188         if (could_have_src && !src_spec) {
5189           src = maybe_cast_profiled_obj(src, src_k, true);


5190         }
5191         if (could_have_dest && !dest_spec) {
5192           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5193         }
5194       }
5195     }
5196   }
5197 
5198   ciMethod* trap_method = method();
5199   int trap_bci = bci();
5200   if (saved_jvms != NULL) {
5201     trap_method = alloc->jvms()->method();
5202     trap_bci = alloc->jvms()->bci();
5203   }
5204 
5205   bool negative_length_guard_generated = false;
5206 
5207   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5208       can_emit_guards &&
5209       !src->is_top() && !dest->is_top()) {
5210     // validate arguments: enables transformation the ArrayCopyNode
5211     validated = true;
5212 
5213     RegionNode* slow_region = new RegionNode(1);
5214     record_for_igvn(slow_region);
5215 
5216     // (1) src and dest are arrays.
5217     generate_non_array_guard(load_object_klass(src), slow_region);
5218     generate_non_array_guard(load_object_klass(dest), slow_region);
5219 
5220     // (2) src and dest arrays must have elements of the same BasicType
5221     // done at macro expansion or at Ideal transformation time
5222 
5223     // (4) src_offset must not be negative.
5224     generate_negative_guard(src_offset, slow_region);
5225 
5226     // (5) dest_offset must not be negative.
5227     generate_negative_guard(dest_offset, slow_region);
5228 
5229     // (7) src_offset + length must not exceed length of src.

5232                          slow_region);
5233 
5234     // (8) dest_offset + length must not exceed length of dest.
5235     generate_limit_guard(dest_offset, length,
5236                          load_array_length(dest),
5237                          slow_region);
5238 
5239     // (6) length must not be negative.
5240     // This is also checked in generate_arraycopy() during macro expansion, but
5241     // we also have to check it here for the case where the ArrayCopyNode will
5242     // be eliminated by Escape Analysis.
5243     if (EliminateAllocations) {
5244       generate_negative_guard(length, slow_region);
5245       negative_length_guard_generated = true;
5246     }
5247 
5248     // (9) each element of an oop array must be assignable
5249     Node* dest_klass = load_object_klass(dest);
5250     if (src != dest) {
5251       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5252 
5253       if (not_subtype_ctrl != top()) {
5254         PreserveJVMState pjvms(this);
5255         set_control(not_subtype_ctrl);
5256         uncommon_trap(Deoptimization::Reason_intrinsic,
5257                       Deoptimization::Action_make_not_entrant);
5258         assert(stopped(), "Should be stopped");






















5259       }
5260     }

5261     {
5262       PreserveJVMState pjvms(this);
5263       set_control(_gvn.transform(slow_region));
5264       uncommon_trap(Deoptimization::Reason_intrinsic,
5265                     Deoptimization::Action_make_not_entrant);
5266       assert(stopped(), "Should be stopped");
5267     }
5268 
5269     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5270     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5271     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5272   }
5273 
5274   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5275 
5276   if (stopped()) {
5277     return true;
5278   }
5279 
5280   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5281                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5282                                           // so the compiler has a chance to eliminate them: during macro expansion,
5283                                           // we have to set their control (CastPP nodes are eliminated).
5284                                           load_object_klass(src), load_object_klass(dest),
5285                                           load_array_length(src), load_array_length(dest));
5286 
5287   ac->set_arraycopy(validated);
5288 
5289   Node* n = _gvn.transform(ac);
5290   if (n == ac) {
5291     ac->connect_outputs(this);

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 301   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 302   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 303   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 304   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 305   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 306   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 307 
 308   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 309   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 310 
 311   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 312   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 313   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 314   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 315 
 316   case vmIntrinsics::_compressStringC:
 317   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 318   case vmIntrinsics::_inflateStringC:
 319   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 320 
 321   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 322   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 323   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 324   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 325   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 326   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 327   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 328   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 329   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 330   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 331   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 332   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 333 
 334   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 335   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 336   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 337   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 338   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 339   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 340   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 341   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 342   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 343   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 344 
 345   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 346   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 347   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 348   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 349   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 350   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 351   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 352   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 353   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 354 
 355   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 356   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 357   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 358   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 359   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 360   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 361   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 362   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 363   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 497   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 498   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 499   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 500   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 501   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 502 
 503   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 504   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 505 
 506   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 507 
 508   case vmIntrinsics::_isInstance:
 509   case vmIntrinsics::_getModifiers:
 510   case vmIntrinsics::_isInterface:
 511   case vmIntrinsics::_isArray:
 512   case vmIntrinsics::_isPrimitive:
 513   case vmIntrinsics::_isHidden:
 514   case vmIntrinsics::_getSuperclass:
 515   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 516 
 517   case vmIntrinsics::_asPrimaryType:
 518   case vmIntrinsics::_asPrimaryTypeArg:
 519   case vmIntrinsics::_asValueType:
 520   case vmIntrinsics::_asValueTypeArg:           return inline_primitive_Class_conversion(intrinsic_id());
 521 
 522   case vmIntrinsics::_floatToRawIntBits:
 523   case vmIntrinsics::_floatToIntBits:
 524   case vmIntrinsics::_intBitsToFloat:
 525   case vmIntrinsics::_doubleToRawLongBits:
 526   case vmIntrinsics::_doubleToLongBits:
 527   case vmIntrinsics::_longBitsToDouble:
 528   case vmIntrinsics::_floatToFloat16:
 529   case vmIntrinsics::_float16ToFloat:           return inline_fp_conversions(intrinsic_id());
 530 
 531   case vmIntrinsics::_floatIsFinite:
 532   case vmIntrinsics::_floatIsInfinite:
 533   case vmIntrinsics::_doubleIsFinite:
 534   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 535 
 536   case vmIntrinsics::_numberOfLeadingZeros_i:
 537   case vmIntrinsics::_numberOfLeadingZeros_l:
 538   case vmIntrinsics::_numberOfTrailingZeros_i:
 539   case vmIntrinsics::_numberOfTrailingZeros_l:
 540   case vmIntrinsics::_bitCount_i:
 541   case vmIntrinsics::_bitCount_l:

1982     return Type::AnyPtr;
1983   } else if (base_type == TypePtr::NULL_PTR) {
1984     // Since this is a NULL+long form, we have to switch to a rawptr.
1985     base   = _gvn.transform(new CastX2PNode(offset));
1986     offset = MakeConX(0);
1987     return Type::RawPtr;
1988   } else if (base_type->base() == Type::RawPtr) {
1989     return Type::RawPtr;
1990   } else if (base_type->isa_oopptr()) {
1991     // Base is never null => always a heap address.
1992     if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
1993       return Type::OopPtr;
1994     }
1995     // Offset is small => always a heap address.
1996     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
1997     if (offset_type != NULL &&
1998         base_type->offset() == 0 &&     // (should always be?)
1999         offset_type->_lo >= 0 &&
2000         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2001       return Type::OopPtr;
2002     } else if (type == T_OBJECT || type == T_PRIMITIVE_OBJECT) {
2003       // off heap access to an oop doesn't make any sense. Has to be on
2004       // heap.
2005       return Type::OopPtr;
2006     }
2007     // Otherwise, it might either be oop+off or NULL+addr.
2008     return Type::AnyPtr;
2009   } else {
2010     // No information:
2011     return Type::AnyPtr;
2012   }
2013 }
2014 
2015 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
2016   Node* uncasted_base = base;
2017   int kind = classify_unsafe_addr(uncasted_base, offset, type);
2018   if (kind == Type::RawPtr) {
2019     return basic_plus_adr(top(), uncasted_base, offset);
2020   } else if (kind == Type::AnyPtr) {
2021     assert(base == uncasted_base, "unexpected base change");
2022     if (can_cast) {

2161     case vmIntrinsics::_remainderUnsigned_l: {
2162       zero_check_long(argument(2));
2163       // Compile-time detect of null-exception
2164       if (stopped()) {
2165         return true; // keep the graph constructed so far
2166       }
2167       n = new UModLNode(control(), argument(0), argument(2));
2168       break;
2169     }
2170     default:  fatal_unexpected_iid(id);  break;
2171   }
2172   set_result(_gvn.transform(n));
2173   return true;
2174 }
2175 
2176 //----------------------------inline_unsafe_access----------------------------
2177 
2178 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2179   // Attempt to infer a sharper value type from the offset and base type.
2180   ciKlass* sharpened_klass = NULL;
2181   bool null_free = false;
2182 
2183   // See if it is an instance field, with an object type.
2184   if (alias_type->field() != NULL) {
2185     if (alias_type->field()->type()->is_klass()) {
2186       sharpened_klass = alias_type->field()->type()->as_klass();
2187       null_free = alias_type->field()->is_null_free();
2188     }
2189   }
2190 
2191   const TypeOopPtr* result = NULL;
2192   // See if it is a narrow oop array.
2193   if (adr_type->isa_aryptr()) {
2194     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2195       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2196       null_free = adr_type->is_aryptr()->is_null_free();
2197       if (elem_type != NULL && elem_type->is_loaded()) {
2198         // Sharpen the value type.
2199         result = elem_type;
2200       }
2201     }
2202   }
2203 
2204   // The sharpened class might be unloaded if there is no class loader
2205   // contraint in place.
2206   if (result == NULL && sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2207     // Sharpen the value type.
2208     result = TypeOopPtr::make_from_klass(sharpened_klass);
2209     if (null_free) {
2210       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2211     }
2212   }
2213   if (result != NULL) {
2214 #ifndef PRODUCT
2215     if (C->print_intrinsics() || C->print_inlining()) {
2216       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2217       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2218     }
2219 #endif
2220   }
2221   return result;
2222 }
2223 
2224 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2225   switch (kind) {
2226       case Relaxed:
2227         return MO_UNORDERED;
2228       case Opaque:
2229         return MO_RELAXED;
2230       case Acquire:
2231         return MO_ACQUIRE;

2246   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2247   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2248 
2249   if (is_reference_type(type)) {
2250     decorators |= ON_UNKNOWN_OOP_REF;
2251   }
2252 
2253   if (unaligned) {
2254     decorators |= C2_UNALIGNED;
2255   }
2256 
2257 #ifndef PRODUCT
2258   {
2259     ResourceMark rm;
2260     // Check the signatures.
2261     ciSignature* sig = callee()->signature();
2262 #ifdef ASSERT
2263     if (!is_store) {
2264       // Object getReference(Object base, int/long offset), etc.
2265       BasicType rtype = sig->return_type()->basic_type();
2266       assert(rtype == type || (rtype == T_OBJECT && type == T_PRIMITIVE_OBJECT), "getter must return the expected value");
2267       assert(sig->count() == 2 || (type == T_PRIMITIVE_OBJECT && sig->count() == 3), "oop getter has 2 or 3 arguments");
2268       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2269       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2270     } else {
2271       // void putReference(Object base, int/long offset, Object x), etc.
2272       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2273       assert(sig->count() == 3 || (type == T_PRIMITIVE_OBJECT && sig->count() == 4), "oop putter has 3 arguments");
2274       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2275       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2276       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2277       assert(vtype == type || (type == T_PRIMITIVE_OBJECT && vtype == T_OBJECT), "putter must accept the expected value");
2278     }
2279 #endif // ASSERT
2280  }
2281 #endif //PRODUCT
2282 
2283   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2284 
2285   Node* receiver = argument(0);  // type: oop
2286 
2287   // Build address expression.
2288   Node* heap_base_oop = top();
2289 
2290   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2291   Node* base = argument(1);  // type: oop
2292   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2293   Node* offset = argument(2);  // type: long
2294   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2295   // to be plain byte offsets, which are also the same as those accepted
2296   // by oopDesc::field_addr.
2297   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2298          "fieldOffset must be byte-scaled");
2299 
2300   ciInlineKlass* inline_klass = NULL;
2301   if (type == T_PRIMITIVE_OBJECT) {
2302     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2303     if (cls == NULL || cls->const_oop() == NULL) {
2304       return false;
2305     }
2306     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2307     if (!mirror_type->is_inlinetype()) {
2308       return false;
2309     }
2310     inline_klass = mirror_type->as_inline_klass();
2311   }
2312 
2313   if (base->is_InlineType()) {
2314     InlineTypeNode* vt = base->as_InlineType();
2315     if (is_store) {
2316       if (!vt->is_allocated(&_gvn)) {
2317         return false;
2318       }
2319       base = vt->get_oop();
2320     } else {
2321       if (offset->is_Con()) {
2322         long off = find_long_con(offset, 0);
2323         ciInlineKlass* vk = vt->type()->inline_klass();
2324         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2325           return false;
2326         }
2327 
2328         ciField* field = vk->get_non_flattened_field_by_offset(off);
2329         if (field != NULL) {
2330           BasicType bt = field->layout_type();
2331           if (bt == T_ARRAY || bt == T_NARROWOOP || (bt == T_PRIMITIVE_OBJECT && !field->is_flattened())) {
2332             bt = T_OBJECT;
2333           }
2334           if (bt == type && (bt != T_PRIMITIVE_OBJECT || field->type() == inline_klass)) {
2335             Node* value = vt->field_value_by_offset(off, false);
2336             if (value->is_InlineType()) {
2337               value = value->as_InlineType()->adjust_scalarization_depth(this);
2338             }
2339             set_result(value);
2340             return true;
2341           }
2342         }
2343       }
2344       {
2345         // Re-execute the unsafe access if allocation triggers deoptimization.
2346         PreserveReexecuteState preexecs(this);
2347         jvms()->set_should_reexecute(true);
2348         vt = vt->buffer(this);
2349       }
2350       base = vt->get_oop();
2351     }
2352   }
2353 
2354   // 32-bit machines ignore the high half!
2355   offset = ConvL2X(offset);
2356 
2357   // Save state and restore on bailout
2358   uint old_sp = sp();
2359   SafePointNode* old_map = clone_map();
2360 
2361   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2362 
2363   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2364     if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
2365       decorators |= IN_NATIVE; // off-heap primitive access
2366     } else {
2367       set_map(old_map);
2368       set_sp(old_sp);
2369       return false; // off-heap oop accesses are not supported
2370     }
2371   } else {
2372     heap_base_oop = base; // on-heap or mixed access
2373   }
2374 
2375   // Can base be NULL? Otherwise, always on-heap access.
2376   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2377 
2378   if (!can_access_non_heap) {
2379     decorators |= IN_HEAP;
2380   }
2381 
2382   Node* val = is_store ? argument(4 + (type == T_PRIMITIVE_OBJECT ? 1 : 0)) : NULL;
2383 
2384   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2385   if (adr_type == TypePtr::NULL_PTR) {
2386     set_map(old_map);
2387     set_sp(old_sp);
2388     return false; // off-heap access with zero address
2389   }
2390 
2391   // Try to categorize the address.
2392   Compile::AliasType* alias_type = C->alias_type(adr_type);
2393   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2394 
2395   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2396       alias_type->adr_type() == TypeAryPtr::RANGE) {
2397     set_map(old_map);
2398     set_sp(old_sp);
2399     return false; // not supported
2400   }
2401 
2402   bool mismatched = false;
2403   BasicType bt = T_ILLEGAL;
2404   ciField* field = NULL;
2405   if (adr_type->isa_instptr()) {
2406     const TypeInstPtr* instptr = adr_type->is_instptr();
2407     ciInstanceKlass* k = instptr->instance_klass();
2408     int off = instptr->offset();
2409     if (instptr->const_oop() != NULL &&
2410         k == ciEnv::current()->Class_klass() &&
2411         instptr->offset() >= (k->size_helper() * wordSize)) {
2412       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2413       field = k->get_field_by_offset(off, true);
2414     } else {
2415       field = k->get_non_flattened_field_by_offset(off);
2416     }
2417     if (field != NULL) {
2418       bt = field->layout_type();
2419     }
2420     assert(bt == alias_type->basic_type() || bt == T_PRIMITIVE_OBJECT, "should match");
2421     if (field != NULL && bt == T_PRIMITIVE_OBJECT && !field->is_flattened()) {
2422       bt = T_OBJECT;
2423     }
2424   } else {
2425     bt = alias_type->basic_type();
2426   }
2427 
2428   if (bt != T_ILLEGAL) {
2429     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2430     if (adr_type->is_flat()) {
2431       bt = T_PRIMITIVE_OBJECT;
2432     }
2433     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2434       // Alias type doesn't differentiate between byte[] and boolean[]).
2435       // Use address type to get the element type.
2436       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2437     }
2438     if (bt != T_PRIMITIVE_OBJECT && is_reference_type(bt, true)) {
2439       // accessing an array field with getReference is not a mismatch
2440       bt = T_OBJECT;
2441     }
2442     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2443       // Don't intrinsify mismatched object accesses
2444       set_map(old_map);
2445       set_sp(old_sp);
2446       return false;
2447     }
2448     mismatched = (bt != type);
2449   } else if (alias_type->adr_type()->isa_oopptr()) {
2450     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2451   }
2452 
2453   if (type == T_PRIMITIVE_OBJECT) {
2454     if (adr_type->isa_instptr()) {
2455       if (field == NULL || field->type() != inline_klass) {
2456         mismatched = true;
2457       }
2458     } else if (adr_type->isa_aryptr()) {
2459       const Type* elem = adr_type->is_aryptr()->elem();
2460       if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2461         mismatched = true;
2462       }
2463     } else {
2464       mismatched = true;
2465     }
2466     if (is_store) {
2467       const Type* val_t = _gvn.type(val);
2468       if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2469         set_map(old_map);
2470         set_sp(old_sp);
2471         return false;
2472       }
2473     }
2474   }
2475 
2476   old_map->destruct(&_gvn);
2477   assert(!mismatched || type == T_PRIMITIVE_OBJECT || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2478 
2479   if (mismatched) {
2480     decorators |= C2_MISMATCHED;
2481   }
2482 
2483   // First guess at the value type.
2484   const Type *value_type = Type::get_const_basic_type(type);
2485 
2486   // Figure out the memory ordering.
2487   decorators |= mo_decorator_for_access_kind(kind);
2488 
2489   if (!is_store) {
2490     if (type == T_OBJECT) {
2491       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2492       if (tjp != NULL) {
2493         value_type = tjp;
2494       }
2495     } else if (type == T_PRIMITIVE_OBJECT) {
2496       value_type = NULL;
2497     }
2498   }
2499 
2500   receiver = null_check(receiver);
2501   if (stopped()) {
2502     return true;
2503   }
2504   // Heap pointers get a null-check from the interpreter,
2505   // as a courtesy.  However, this is not guaranteed by Unsafe,
2506   // and it is not possible to fully distinguish unintended nulls
2507   // from intended ones in this API.
2508 
2509   if (!is_store) {
2510     Node* p = NULL;
2511     // Try to constant fold a load from a constant field
2512 
2513     if (heap_base_oop != top() && field != NULL && field->is_constant() && !field->is_flattened() && !mismatched) {
2514       // final or stable field
2515       p = make_constant_from_field(field, heap_base_oop);
2516     }
2517 
2518     if (p == NULL) { // Could not constant fold the load
2519       if (type == T_PRIMITIVE_OBJECT) {
2520         if (adr_type->isa_instptr() && !mismatched) {
2521           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2522           int offset = adr_type->is_instptr()->offset();
2523           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, base, holder, offset, decorators);
2524         } else {
2525           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, adr, NULL, 0, decorators);
2526         }
2527       } else {
2528         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2529         const TypeOopPtr* ptr = value_type->make_oopptr();
2530         if (ptr != NULL && ptr->is_inlinetypeptr()) {
2531           // Load a non-flattened inline type from memory
2532           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2533         }
2534       }
2535       // Normalize the value returned by getBoolean in the following cases
2536       if (type == T_BOOLEAN &&
2537           (mismatched ||
2538            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2539            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2540                                                       //   and the unsafe access is made to large offset
2541                                                       //   (i.e., larger than the maximum offset necessary for any
2542                                                       //   field access)
2543             ) {
2544           IdealKit ideal = IdealKit(this);
2545 #define __ ideal.
2546           IdealVariable normalized_result(ideal);
2547           __ declarations_done();
2548           __ set(normalized_result, p);
2549           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2550           __ set(normalized_result, ideal.ConI(1));
2551           ideal.end_if();
2552           final_sync(ideal);
2553           p = __ value(normalized_result);
2554 #undef __
2555       }
2556     }
2557     if (type == T_ADDRESS) {
2558       p = gvn().transform(new CastP2XNode(NULL, p));
2559       p = ConvX2UL(p);
2560     }
2561     // The load node has the control of the preceding MemBarCPUOrder.  All
2562     // following nodes will have the control of the MemBarCPUOrder inserted at
2563     // the end of this method.  So, pushing the load onto the stack at a later
2564     // point is fine.
2565     set_result(p);
2566   } else {
2567     if (bt == T_ADDRESS) {
2568       // Repackage the long as a pointer.
2569       val = ConvL2X(val);
2570       val = gvn().transform(new CastX2PNode(val));
2571     }
2572     if (type == T_PRIMITIVE_OBJECT) {
2573       if (adr_type->isa_instptr() && !mismatched) {
2574         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2575         int offset = adr_type->is_instptr()->offset();
2576         val->as_InlineType()->store_flattened(this, base, base, holder, offset, decorators);
2577       } else {
2578         val->as_InlineType()->store_flattened(this, base, adr, NULL, 0, decorators);
2579       }
2580     } else {
2581       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2582     }
2583   }
2584 
2585   if (argument(1)->is_InlineType() && is_store) {
2586     InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2587     value = value->make_larval(this, false);
2588     replace_in_map(argument(1), value);
2589   }
2590 
2591   return true;
2592 }
2593 
2594 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2595   Node* receiver = argument(0);
2596   Node* value = argument(1);
2597   if (!value->is_InlineType()) {
2598     return false;
2599   }
2600 
2601   receiver = null_check(receiver);
2602   if (stopped()) {
2603     return true;
2604   }
2605 
2606   set_result(value->as_InlineType()->make_larval(this, true));
2607   return true;
2608 }
2609 
2610 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2611   Node* receiver = argument(0);
2612   Node* buffer = argument(1);
2613   if (!buffer->is_InlineType()) {
2614     return false;
2615   }
2616   InlineTypeNode* vt = buffer->as_InlineType();
2617   if (!vt->is_allocated(&_gvn)) {
2618     return false;
2619   }
2620   // TODO 8239003 Why is this needed?
2621   if (AllocateNode::Ideal_allocation(vt->get_oop(), &_gvn) == NULL) {
2622     return false;
2623   }
2624 
2625   receiver = null_check(receiver);
2626   if (stopped()) {
2627     return true;
2628   }
2629 
2630   set_result(vt->finish_larval(this));
2631   return true;
2632 }
2633 
2634 //----------------------------inline_unsafe_load_store----------------------------
2635 // This method serves a couple of different customers (depending on LoadStoreKind):
2636 //
2637 // LS_cmp_swap:
2638 //
2639 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2640 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2641 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2642 //
2643 // LS_cmp_swap_weak:
2644 //
2645 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2646 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2647 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2648 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2649 //
2650 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2651 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2652 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2653 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2819     }
2820     case LS_cmp_swap:
2821     case LS_cmp_swap_weak:
2822     case LS_get_add:
2823       break;
2824     default:
2825       ShouldNotReachHere();
2826   }
2827 
2828   // Null check receiver.
2829   receiver = null_check(receiver);
2830   if (stopped()) {
2831     return true;
2832   }
2833 
2834   int alias_idx = C->get_alias_index(adr_type);
2835 
2836   if (is_reference_type(type)) {
2837     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2838 
2839     if (oldval != NULL && oldval->is_InlineType()) {
2840       // Re-execute the unsafe access if allocation triggers deoptimization.
2841       PreserveReexecuteState preexecs(this);
2842       jvms()->set_should_reexecute(true);
2843       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2844     }
2845     if (newval != NULL && newval->is_InlineType()) {
2846       // Re-execute the unsafe access if allocation triggers deoptimization.
2847       PreserveReexecuteState preexecs(this);
2848       jvms()->set_should_reexecute(true);
2849       newval = newval->as_InlineType()->buffer(this)->get_oop();
2850     }
2851 
2852     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2853     // could be delayed during Parse (for example, in adjust_map_after_if()).
2854     // Execute transformation here to avoid barrier generation in such case.
2855     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2856       newval = _gvn.makecon(TypePtr::NULL_PTR);
2857 
2858     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2859       // Refine the value to a null constant, when it is known to be null
2860       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2861     }
2862   }
2863 
2864   Node* result = NULL;
2865   switch (kind) {
2866     case LS_cmp_exchange: {
2867       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2868                                             oldval, newval, value_type, type, decorators);
2869       break;
2870     }
2871     case LS_cmp_swap_weak:

2993   Node* cls = null_check(argument(1));
2994   if (stopped())  return true;
2995 
2996   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2997   kls = null_check(kls);
2998   if (stopped())  return true;  // argument was like int.class
2999 
3000   Node* test = NULL;
3001   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3002     // Note:  The argument might still be an illegal value like
3003     // Serializable.class or Object[].class.   The runtime will handle it.
3004     // But we must make an explicit check for initialization.
3005     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3006     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3007     // can generate code to load it as unsigned byte.
3008     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3009     Node* bits = intcon(InstanceKlass::fully_initialized);
3010     test = _gvn.transform(new SubINode(inst, bits));
3011     // The 'test' is non-zero if we need to take a slow path.
3012   }
3013   Node* obj = NULL;
3014   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3015   if (tkls != NULL && tkls->instance_klass()->is_inlinetype()) {
3016     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3017   } else {
3018     obj = new_instance(kls, test);
3019   }
3020   set_result(obj);
3021   return true;
3022 }
3023 
3024 //------------------------inline_native_time_funcs--------------
3025 // inline code for System.currentTimeMillis() and System.nanoTime()
3026 // these have the same type and signature
3027 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3028   const TypeFunc* tf = OptoRuntime::void_long_Type();
3029   const TypePtr* no_memory_effects = NULL;
3030   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3031   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3032 #ifdef ASSERT
3033   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3034   assert(value_top == top(), "second value must be top");
3035 #endif
3036   set_result(value);
3037   return true;
3038 }
3039 

3579 
3580   Node* thread = _gvn.transform(new ThreadLocalNode());
3581   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3582   // We cannot use immutable_memory() because we might flip onto a
3583   // different carrier thread, at which point we'll need to use that
3584   // carrier thread's cache.
3585   // return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3586   //       TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3587   return make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3588 }
3589 
3590 //------------------------inline_native_scopedValueCache------------------
3591 bool LibraryCallKit::inline_native_scopedValueCache() {
3592   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3593   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3594   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3595 
3596   // Because we create the scopedValue cache lazily we have to make the
3597   // type of the result BotPTR.
3598   bool xk = etype->klass_is_exact();
3599   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3600   Node* cache_obj_handle = scopedValueCache_helper();
3601   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3602 
3603   return true;
3604 }
3605 
3606 //------------------------inline_native_setScopedValueCache------------------
3607 bool LibraryCallKit::inline_native_setScopedValueCache() {
3608   Node* arr = argument(0);
3609   Node* cache_obj_handle = scopedValueCache_helper();
3610 
3611   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3612   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3613                   MemNode::unordered);
3614 
3615   return true;
3616 }
3617 









3618 //-----------------------load_klass_from_mirror_common-------------------------
3619 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3620 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3621 // and branch to the given path on the region.
3622 // If never_see_null, take an uncommon trap on null, so we can optimistically
3623 // compile for the non-null case.
3624 // If the region is NULL, force never_see_null = true.
3625 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3626                                                     bool never_see_null,
3627                                                     RegionNode* region,
3628                                                     int null_path,
3629                                                     int offset) {
3630   if (region == NULL)  never_see_null = true;
3631   Node* p = basic_plus_adr(mirror, offset);
3632   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3633   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3634   Node* null_ctl = top();
3635   kls = null_check_oop(kls, &null_ctl, never_see_null);
3636   if (region != NULL) {
3637     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3640     assert(null_ctl == top(), "no loose ends");
3641   }
3642   return kls;
3643 }
3644 
3645 //--------------------(inline_native_Class_query helpers)---------------------
3646 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3647 // Fall through if (mods & mask) == bits, take the guard otherwise.
3648 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3649   // Branch around if the given klass has the given modifier bit set.
3650   // Like generate_guard, adds a new path onto the region.
3651   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3652   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3653   Node* mask = intcon(modifier_mask);
3654   Node* bits = intcon(modifier_bits);
3655   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3656   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3657   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3658   return generate_fair_guard(bol, region);
3659 }
3660 
3661 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3662   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3663 }
3664 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3665   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3666 }
3667 
3668 //-------------------------inline_native_Class_query-------------------
3669 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3670   const Type* return_type = TypeInt::BOOL;
3671   Node* prim_return_value = top();  // what happens if it's a primitive class?
3672   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3673   bool expect_prim = false;     // most of these guys expect to work on refs
3674 
3675   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3676 
3677   Node* mirror = argument(0);
3678   Node* obj    = top();
3679 
3680   switch (id) {

3834 
3835   case vmIntrinsics::_getClassAccessFlags:
3836     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3837     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3838     break;
3839 
3840   default:
3841     fatal_unexpected_iid(id);
3842     break;
3843   }
3844 
3845   // Fall-through is the normal case of a query to a real class.
3846   phi->init_req(1, query_value);
3847   region->init_req(1, control());
3848 
3849   C->set_has_split_ifs(true); // Has chance for split-if optimization
3850   set_result(region, phi);
3851   return true;
3852 }
3853 
3854 //-------------------------inline_primitive_Class_conversion-------------------
3855 //               Class<T> java.lang.Class                  .asPrimaryType()
3856 // public static Class<T> jdk.internal.value.PrimitiveClass.asPrimaryType(Class<T>)
3857 //               Class<T> java.lang.Class                  .asValueType()
3858 // public static Class<T> jdk.internal.value.PrimitiveClass.asValueType(Class<T>)
3859 bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
3860   Node* mirror = argument(0); // Receiver/argument Class
3861   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3862   if (mirror_con == NULL) {
3863     return false;
3864   }
3865 
3866   bool is_val_mirror = true;
3867   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
3868   if (tm != NULL) {
3869     Node* result = mirror;
3870     if ((id == vmIntrinsics::_asPrimaryType || id == vmIntrinsics::_asPrimaryTypeArg) && is_val_mirror) {
3871       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
3872     } else if (id == vmIntrinsics::_asValueType || id == vmIntrinsics::_asValueTypeArg) {
3873       if (!tm->is_inlinetype()) {
3874         return false; // Throw UnsupportedOperationException
3875       } else if (!is_val_mirror) {
3876         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
3877       }
3878     }
3879     set_result(result);
3880     return true;
3881   }
3882   return false;
3883 }
3884 
3885 //-------------------------inline_Class_cast-------------------
3886 bool LibraryCallKit::inline_Class_cast() {
3887   Node* mirror = argument(0); // Class
3888   Node* obj    = argument(1);
3889   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3890   if (mirror_con == NULL) {
3891     return false;  // dead path (mirror->is_top()).
3892   }
3893   if (obj == NULL || obj->is_top()) {
3894     return false;  // dead path
3895   }
3896   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3897 
3898   // First, see if Class.cast() can be folded statically.
3899   // java_mirror_type() returns non-null for compile-time Class constants.
3900   bool requires_null_check = false;
3901   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
3902   if (tm != NULL && tm->is_klass() &&
3903       tp != NULL) {
3904     if (!tp->is_loaded()) {
3905       // Don't use intrinsic when class is not loaded.
3906       return false;
3907     } else {
3908       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass(), Type::trust_interfaces), tp->as_klass_type());
3909       if (static_res == Compile::SSC_always_true) {
3910         // isInstance() is true - fold the code.
3911         if (requires_null_check) {
3912           obj = null_check(obj);
3913         }
3914         set_result(obj);
3915         return true;
3916       } else if (static_res == Compile::SSC_always_false) {
3917         // Don't use intrinsic, have to throw ClassCastException.
3918         // If the reference is null, the non-intrinsic bytecode will
3919         // be optimized appropriately.
3920         return false;
3921       }
3922     }
3923   }
3924 
3925   // Bailout intrinsic and do normal inlining if exception path is frequent.
3926   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3927     return false;
3928   }
3929 
3930   // Generate dynamic checks.
3931   // Class.cast() is java implementation of _checkcast bytecode.
3932   // Do checkcast (Parse::do_checkcast()) optimizations here.
3933 
3934   if (requires_null_check) {
3935     obj = null_check(obj);
3936   }
3937   mirror = null_check(mirror);
3938   // If mirror is dead, only null-path is taken.
3939   if (stopped()) {
3940     return true;
3941   }
3942 
3943   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3944   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
3945   RegionNode* region = new RegionNode(PATH_LIMIT);
3946   record_for_igvn(region);
3947 
3948   // Now load the mirror's klass metaobject, and null-check it.
3949   // If kls is null, we have a primitive mirror and
3950   // nothing is an instance of a primitive type.
3951   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3952 
3953   Node* res = top();
3954   Node* io = i_o();
3955   Node* mem = merged_memory();
3956   if (!stopped()) {
3957     if (EnableValhalla && !requires_null_check) {
3958       // Check if we are casting to QMyValue
3959       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
3960       if (ctrl_val_mirror != NULL) {
3961         RegionNode* r = new RegionNode(3);
3962         record_for_igvn(r);
3963         r->init_req(1, control());
3964 
3965         // Casting to QMyValue, check for null
3966         set_control(ctrl_val_mirror);
3967         { // PreserveJVMState because null check replaces obj in map
3968           PreserveJVMState pjvms(this);
3969           Node* null_ctr = top();
3970           null_check_oop(obj, &null_ctr);
3971           region->init_req(_npe_path, null_ctr);
3972           r->init_req(2, control());
3973         }
3974         set_control(_gvn.transform(r));
3975       }
3976     }
3977 
3978     Node* bad_type_ctrl = top();
3979     // Do checkcast optimizations.
3980     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3981     region->init_req(_bad_type_path, bad_type_ctrl);
3982   }
3983   if (region->in(_prim_path) != top() ||
3984       region->in(_bad_type_path) != top() ||
3985       region->in(_npe_path) != top()) {
3986     // Let Interpreter throw ClassCastException.
3987     PreserveJVMState pjvms(this);
3988     set_control(_gvn.transform(region));
3989     // Set IO and memory because gen_checkcast may override them when buffering inline types
3990     set_i_o(io);
3991     set_all_memory(mem);
3992     uncommon_trap(Deoptimization::Reason_intrinsic,
3993                   Deoptimization::Action_maybe_recompile);
3994   }
3995   if (!stopped()) {
3996     set_result(res);
3997   }
3998   return true;
3999 }
4000 
4001 
4002 //--------------------------inline_native_subtype_check------------------------
4003 // This intrinsic takes the JNI calls out of the heart of
4004 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4005 bool LibraryCallKit::inline_native_subtype_check() {
4006   // Pull both arguments off the stack.
4007   Node* args[2];                // two java.lang.Class mirrors: superc, subc
4008   args[0] = argument(0);
4009   args[1] = argument(1);
4010   Node* klasses[2];             // corresponding Klasses: superk, subk
4011   klasses[0] = klasses[1] = top();
4012 
4013   enum {
4014     // A full decision tree on {superc is prim, subc is prim}:
4015     _prim_0_path = 1,           // {P,N} => false
4016                                 // {P,P} & superc!=subc => false
4017     _prim_same_path,            // {P,P} & superc==subc => true
4018     _prim_1_path,               // {N,P} => false
4019     _ref_subtype_path,          // {N,N} & subtype check wins => true
4020     _both_ref_path,             // {N,N} & subtype check loses => false
4021     PATH_LIMIT
4022   };
4023 
4024   RegionNode* region = new RegionNode(PATH_LIMIT);
4025   RegionNode* prim_region = new RegionNode(2);
4026   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4027   record_for_igvn(region);
4028   record_for_igvn(prim_region);
4029 
4030   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4031   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4032   int class_klass_offset = java_lang_Class::klass_offset();
4033 
4034   // First null-check both mirrors and load each mirror's klass metaobject.
4035   int which_arg;
4036   for (which_arg = 0; which_arg <= 1; which_arg++) {
4037     Node* arg = args[which_arg];
4038     arg = null_check(arg);
4039     if (stopped())  break;
4040     args[which_arg] = arg;
4041 
4042     Node* p = basic_plus_adr(arg, class_klass_offset);
4043     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
4044     klasses[which_arg] = _gvn.transform(kls);
4045   }
4046 
4047   // Having loaded both klasses, test each for null.
4048   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4049   for (which_arg = 0; which_arg <= 1; which_arg++) {
4050     Node* kls = klasses[which_arg];
4051     Node* null_ctl = top();
4052     kls = null_check_oop(kls, &null_ctl, never_see_null);
4053     if (which_arg == 0) {
4054       prim_region->init_req(1, null_ctl);
4055     } else {
4056       region->init_req(_prim_1_path, null_ctl);
4057     }
4058     if (stopped())  break;
4059     klasses[which_arg] = kls;
4060   }
4061 
4062   if (!stopped()) {
4063     // now we have two reference types, in klasses[0..1]
4064     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4065     Node* superk = klasses[0];  // the receiver
4066     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4067     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
4068     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
4069     generate_fair_guard(is_val_mirror(args[0]), prim_region);
4070     // now we have a successful reference subtype check
4071     region->set_req(_ref_subtype_path, control());
4072   }
4073 
4074   // If both operands are primitive (both klasses null), then
4075   // we must return true when they are identical primitives.
4076   // It is convenient to test this after the first null klass check.
4077   // This path is also used if superc is a value mirror.
4078   set_control(_gvn.transform(prim_region));
4079   if (!stopped()) {
4080     // Since superc is primitive, make a guard for the superc==subc case.
4081     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4082     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4083     generate_fair_guard(bol_eq, region);
4084     if (region->req() == PATH_LIMIT+1) {
4085       // A guard was added.  If the added guard is taken, superc==subc.
4086       region->swap_edges(PATH_LIMIT, _prim_same_path);
4087       region->del_req(PATH_LIMIT);
4088     }
4089     region->set_req(_prim_0_path, control()); // Not equal after all.
4090   }
4091 
4092   // these are the only paths that produce 'true':
4093   phi->set_req(_prim_same_path,   intcon(1));
4094   phi->set_req(_ref_subtype_path, intcon(1));
4095 
4096   // pull together the cases:
4097   assert(region->req() == PATH_LIMIT, "sane region");
4098   for (uint i = 1; i < region->req(); i++) {
4099     Node* ctl = region->in(i);
4100     if (ctl == NULL || ctl == top()) {
4101       region->set_req(i, top());
4102       phi   ->set_req(i, top());
4103     } else if (phi->in(i) == NULL) {
4104       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4105     }
4106   }
4107 
4108   set_control(_gvn.transform(region));
4109   set_result(_gvn.transform(phi));
4110   return true;
4111 }
4112 
4113 //---------------------generate_array_guard_common------------------------
4114 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4115 
4116   if (stopped()) {
4117     return NULL;
4118   }
4119 









4120   // Like generate_guard, adds a new path onto the region.
4121   jint  layout_con = 0;
4122   Node* layout_val = get_layout_helper(kls, layout_con);
4123   if (layout_val == NULL) {
4124     bool query = 0;
4125     switch(kind) {
4126       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4127       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4128       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4129       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4130       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4131       default:
4132         ShouldNotReachHere();
4133     }
4134     if (!query) {
4135       return NULL;                       // never a branch
4136     } else {                             // always a branch
4137       Node* always_branch = control();
4138       if (region != NULL)
4139         region->add_req(always_branch);
4140       set_control(top());
4141       return always_branch;
4142     }
4143   }
4144   unsigned int value = 0;
4145   BoolTest::mask btest = BoolTest::illegal;
4146   switch(kind) {
4147     case ObjectArray:
4148     case NonObjectArray: {
4149       value = Klass::_lh_array_tag_obj_value;
4150       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4151       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4152       break;
4153     }
4154     case TypeArray: {
4155       value = Klass::_lh_array_tag_type_value;
4156       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4157       btest = BoolTest::eq;
4158       break;
4159     }
4160     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4161     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4162     default:
4163       ShouldNotReachHere();
4164   }
4165   // Now test the correct condition.
4166   jint nval = (jint)value;



4167   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4168   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4169   return generate_fair_guard(bol, region);
4170 }
4171 
4172 
4173 //-----------------------inline_native_newArray--------------------------
4174 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4175 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4176 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4177   Node* mirror;
4178   Node* count_val;
4179   if (uninitialized) {
4180     mirror    = argument(1);
4181     count_val = argument(2);
4182   } else {
4183     mirror    = argument(0);
4184     count_val = argument(1);
4185   }
4186 
4187   mirror = null_check(mirror);
4188   // If mirror or obj is dead, only null-path is taken.
4189   if (stopped())  return true;
4190 
4191   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4192   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4193   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4194   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

4299   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4300   { PreserveReexecuteState preexecs(this);
4301     jvms()->set_should_reexecute(true);
4302 
4303     array_type_mirror = null_check(array_type_mirror);
4304     original          = null_check(original);
4305 
4306     // Check if a null path was taken unconditionally.
4307     if (stopped())  return true;
4308 
4309     Node* orig_length = load_array_length(original);
4310 
4311     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
4312     klass_node = null_check(klass_node);
4313 
4314     RegionNode* bailout = new RegionNode(1);
4315     record_for_igvn(bailout);
4316 
4317     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4318     // Bail out if that is so.
4319     // Inline type array may have object field that would require a
4320     // write barrier. Conservatively, go to slow path.
4321     // TODO 8251971: Optimize for the case when flat src/dst are later found
4322     // to not contain oops (i.e., move this check to the macro expansion phase).
4323     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4324     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4325     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4326     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4327                         // Can src array be flat and contain oops?
4328                         (orig_t == NULL || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4329                         // Can dest array be flat and contain oops?
4330                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4331     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4332     if (not_objArray != NULL) {
4333       // Improve the klass node's type from the new optimistic assumption:
4334       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4335       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4336       Node* cast = new CastPPNode(klass_node, akls);
4337       cast->init_req(0, control());
4338       klass_node = _gvn.transform(cast);
4339     }
4340 
4341     // Bail out if either start or end is negative.
4342     generate_negative_guard(start, bailout, &start);
4343     generate_negative_guard(end,   bailout, &end);
4344 
4345     Node* length = end;
4346     if (_gvn.type(start) != TypeInt::ZERO) {
4347       length = _gvn.transform(new SubINode(end, start));
4348     }
4349 
4350     // Bail out if length is negative.
4351     // Without this the new_array would throw
4352     // NegativeArraySizeException but IllegalArgumentException is what
4353     // should be thrown
4354     generate_negative_guard(length, bailout, &length);
4355 
4356     // Handle inline type arrays
4357     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4358     if (!stopped()) {
4359       orig_t = _gvn.type(original)->isa_aryptr();
4360       if (orig_t != NULL && orig_t->is_flat()) {
4361         // Src is flat, check that dest is flat as well
4362         if (exclude_flat) {
4363           // Dest can't be flat, bail out
4364           bailout->add_req(control());
4365           set_control(top());
4366         } else {
4367           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4368         }
4369       } else if (UseFlatArray && (orig_t == NULL || !orig_t->is_not_flat()) &&
4370                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4371                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4372         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4373         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4374         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4375         if (orig_t != NULL) {
4376           orig_t = orig_t->cast_to_not_flat();
4377           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4378         }
4379       }
4380       if (!can_validate) {
4381         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4382         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4383         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4384         generate_fair_guard(null_free_array_test(klass_node), bailout);
4385       }
4386     }
4387 
4388     if (bailout->req() > 1) {
4389       PreserveJVMState pjvms(this);
4390       set_control(_gvn.transform(bailout));
4391       uncommon_trap(Deoptimization::Reason_intrinsic,
4392                     Deoptimization::Action_maybe_recompile);
4393     }
4394 
4395     if (!stopped()) {
4396       // How many elements will we copy from the original?
4397       // The answer is MinI(orig_length - start, length).
4398       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4399       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4400 
4401       // Generate a direct call to the right arraycopy function(s).
4402       // We know the copy is disjoint but we might not know if the
4403       // oop stores need checking.
4404       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4405       // This will fail a store-check if x contains any non-nulls.
4406 
4407       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4410       // to the copyOf to be validated, including that the copy to the
4411       // new array won't trigger an ArrayStoreException. That subtype
4412       // check can be optimized if we know something on the type of
4413       // the input array from type speculation.
4414       if (_gvn.type(klass_node)->singleton()) {
4415         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4416         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4417 
4418         int test = C->static_subtype_check(superk, subk);
4419         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4420           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4421           if (t_original->speculative_type() != NULL) {
4422             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4423           }
4424         }
4425       }
4426 
4427       bool validated = false;
4428       // Reason_class_check rather than Reason_intrinsic because we
4429       // want to intrinsify even if this traps.
4430       if (can_validate) {
4431         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4432 
4433         if (not_subtype_ctrl != top()) {
4434           PreserveJVMState pjvms(this);
4435           set_control(not_subtype_ctrl);
4436           uncommon_trap(Deoptimization::Reason_class_check,
4437                         Deoptimization::Action_make_not_entrant);
4438           assert(stopped(), "Should be stopped");
4439         }
4440         validated = true;
4441       }
4442 
4443       if (!stopped()) {
4444         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4445 
4446         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4447                                                 load_object_klass(original), klass_node);
4448         if (!is_copyOfRange) {
4449           ac->set_copyof(validated);
4450         } else {

4549   set_edges_for_java_call(slow_call);
4550   return slow_call;
4551 }
4552 
4553 
4554 /**
4555  * Build special case code for calls to hashCode on an object. This call may
4556  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4557  * slightly different code.
4558  */
4559 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4560   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4561   assert(!(is_virtual && is_static), "either virtual, special, or static");
4562 
4563   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4564 
4565   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4566   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4567   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4568   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4569   Node* obj = argument(0);
4570 
4571   if (gvn().type(obj)->is_inlinetypeptr()) {
4572     return false;
4573   }
4574 
4575   if (!is_static) {
4576     // Check for hashing null object
4577     obj = null_check_receiver();
4578     if (stopped())  return true;        // unconditionally null
4579     result_reg->init_req(_null_path, top());
4580     result_val->init_req(_null_path, top());
4581   } else {
4582     // Do a null check, and return zero if null.
4583     // System.identityHashCode(null) == 0

4584     Node* null_ctl = top();
4585     obj = null_check_oop(obj, &null_ctl);
4586     result_reg->init_req(_null_path, null_ctl);
4587     result_val->init_req(_null_path, _gvn.intcon(0));
4588   }
4589 
4590   // Unconditionally null?  Then return right away.
4591   if (stopped()) {
4592     set_control( result_reg->in(_null_path));
4593     if (!stopped())
4594       set_result(result_val->in(_null_path));
4595     return true;
4596   }
4597 
4598   // We only go to the fast case code if we pass a number of guards.  The
4599   // paths which do not pass are accumulated in the slow_region.
4600   RegionNode* slow_region = new RegionNode(1);
4601   record_for_igvn(slow_region);
4602 
4603   // If this is a virtual call, we generate a funny guard.  We pull out
4604   // the vtable entry corresponding to hashCode() from the target object.
4605   // If the target method which we are calling happens to be the native
4606   // Object hashCode() method, we pass the guard.  We do not need this
4607   // guard for non-virtual calls -- the caller is known to be the native
4608   // Object hashCode().
4609   if (is_virtual) {
4610     // After null check, get the object's klass.
4611     Node* obj_klass = load_object_klass(obj);
4612     generate_virtual_guard(obj_klass, slow_region);
4613   }
4614 
4615   // Get the header out of the object, use LoadMarkNode when available
4616   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4617   // The control of the load must be NULL. Otherwise, the load can move before
4618   // the null check after castPP removal.
4619   Node* no_ctrl = NULL;
4620   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4621 
4622   // Test the header to see if it is unlocked.
4623   // This also serves as guard against inline types
4624   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4625   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4626   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4627   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4628   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4629 
4630   generate_slow_guard(test_unlocked, slow_region);
4631 
4632   // Get the hash value and check to see that it has been properly assigned.
4633   // We depend on hash_mask being at most 32 bits and avoid the use of
4634   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4635   // vm: see markWord.hpp.
4636   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4637   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4638   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4639   // This hack lets the hash bits live anywhere in the mark object now, as long
4640   // as the shift drops the relevant bits into the low 32 bits.  Note that
4641   // Java spec says that HashCode is an int so there's no point in capturing
4642   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4643   hshifted_header      = ConvX2I(hshifted_header);
4644   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4670     // this->control() comes from set_results_for_java_call
4671     result_reg->init_req(_slow_path, control());
4672     result_val->init_req(_slow_path, slow_result);
4673     result_io  ->set_req(_slow_path, i_o());
4674     result_mem ->set_req(_slow_path, reset_memory());
4675   }
4676 
4677   // Return the combined state.
4678   set_i_o(        _gvn.transform(result_io)  );
4679   set_all_memory( _gvn.transform(result_mem));
4680 
4681   set_result(result_reg, result_val);
4682   return true;
4683 }
4684 
4685 //---------------------------inline_native_getClass----------------------------
4686 // public final native Class<?> java.lang.Object.getClass();
4687 //
4688 // Build special case code for calls to getClass on an object.
4689 bool LibraryCallKit::inline_native_getClass() {
4690   Node* obj = argument(0);
4691   if (obj->is_InlineType()) {
4692     const Type* t = _gvn.type(obj);
4693     if (t->maybe_null()) {
4694       null_check(obj);
4695     }
4696     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4697     return true;
4698   }
4699   obj = null_check_receiver();
4700   if (stopped())  return true;
4701   set_result(load_mirror_from_klass(load_object_klass(obj)));
4702   return true;
4703 }
4704 
4705 //-----------------inline_native_Reflection_getCallerClass---------------------
4706 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4707 //
4708 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4709 //
4710 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4711 // in that it must skip particular security frames and checks for
4712 // caller sensitive methods.
4713 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4714 #ifndef PRODUCT
4715   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4716     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4717   }
4718 #endif
4719 

5044 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5045 //
5046 // The general case has two steps, allocation and copying.
5047 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5048 //
5049 // Copying also has two cases, oop arrays and everything else.
5050 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5051 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5052 //
5053 // These steps fold up nicely if and when the cloned object's klass
5054 // can be sharply typed as an object array, a type array, or an instance.
5055 //
5056 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5057   PhiNode* result_val;
5058 
5059   // Set the reexecute bit for the interpreter to reexecute
5060   // the bytecode that invokes Object.clone if deoptimization happens.
5061   { PreserveReexecuteState preexecs(this);
5062     jvms()->set_should_reexecute(true);
5063 
5064     Node* obj = argument(0);
5065     obj = null_check_receiver();
5066     if (stopped())  return true;
5067 
5068     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5069 
5070     // If we are going to clone an instance, we need its exact type to
5071     // know the number and types of fields to convert the clone to
5072     // loads/stores. Maybe a speculative type can help us.
5073     if (!obj_type->klass_is_exact() &&
5074         obj_type->speculative_type() != NULL &&
5075         obj_type->speculative_type()->is_instance_klass() &&
5076         !obj_type->speculative_type()->is_inlinetype()) {
5077       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5078       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5079           !spec_ik->has_injected_fields()) {
5080         if (!obj_type->isa_instptr() ||
5081             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5082           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5083         }
5084       }
5085     }
5086 
5087     // Conservatively insert a memory barrier on all memory slices.
5088     // Do not let writes into the original float below the clone.
5089     insert_mem_bar(Op_MemBarCPUOrder);
5090 
5091     // paths into result_reg:
5092     enum {
5093       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5094       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5095       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5096       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5097       PATH_LIMIT
5098     };
5099     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5100     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5101     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5102     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5103     record_for_igvn(result_reg);
5104 
5105     Node* obj_klass = load_object_klass(obj);
5106     // We only go to the fast case code if we pass a number of guards.
5107     // The paths which do not pass are accumulated in the slow_region.
5108     RegionNode* slow_region = new RegionNode(1);
5109     record_for_igvn(slow_region);
5110 
5111     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
5112     if (array_ctl != NULL) {
5113       // It's an array.
5114       PreserveJVMState pjvms(this);
5115       set_control(array_ctl);



5116 
5117       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5118       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5119       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5120           obj_type->can_be_inline_array() &&
5121           (ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5122         // Flattened inline type array may have object field that would require a
5123         // write barrier. Conservatively, go to slow path.
5124         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5125       }







5126 
5127       if (!stopped()) {
5128         Node* obj_length = load_array_length(obj);
5129         Node* obj_size  = NULL;
5130         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
5131 
5132         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5133         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5134           // If it is an oop array, it requires very special treatment,
5135           // because gc barriers are required when accessing the array.
5136           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
5137           if (is_obja != NULL) {
5138             PreserveJVMState pjvms2(this);
5139             set_control(is_obja);
5140             // Generate a direct call to the right arraycopy function(s).
5141             // Clones are always tightly coupled.
5142             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5143             ac->set_clone_oop_array();
5144             Node* n = _gvn.transform(ac);
5145             assert(n == ac, "cannot disappear");
5146             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5147 
5148             result_reg->init_req(_objArray_path, control());
5149             result_val->init_req(_objArray_path, alloc_obj);
5150             result_i_o ->set_req(_objArray_path, i_o());
5151             result_mem ->set_req(_objArray_path, reset_memory());
5152           }
5153         }
5154         // Otherwise, there are no barriers to worry about.
5155         // (We can dispense with card marks if we know the allocation
5156         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5157         //  causes the non-eden paths to take compensating steps to
5158         //  simulate a fresh allocation, so that no further
5159         //  card marks are required in compiled code to initialize
5160         //  the object.)
5161 
5162         if (!stopped()) {
5163           copy_to_clone(obj, alloc_obj, obj_size, true);
5164 
5165           // Present the results of the copy.
5166           result_reg->init_req(_array_path, control());
5167           result_val->init_req(_array_path, alloc_obj);
5168           result_i_o ->set_req(_array_path, i_o());
5169           result_mem ->set_req(_array_path, reset_memory());
5170         }
5171       }
5172     }
5173 




5174     if (!stopped()) {
5175       // It's an instance (we did array above).  Make the slow-path tests.
5176       // If this is a virtual call, we generate a funny guard.  We grab
5177       // the vtable entry corresponding to clone() from the target object.
5178       // If the target method which we are calling happens to be the
5179       // Object clone() method, we pass the guard.  We do not need this
5180       // guard for non-virtual calls; the caller is known to be the native
5181       // Object clone().
5182       if (is_virtual) {
5183         generate_virtual_guard(obj_klass, slow_region);
5184       }
5185 
5186       // The object must be easily cloneable and must not have a finalizer.
5187       // Both of these conditions may be checked in a single test.
5188       // We could optimize the test further, but we don't care.
5189       generate_access_flags_guard(obj_klass,
5190                                   // Test both conditions:
5191                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5192                                   // Must be cloneable but not finalizer:
5193                                   JVM_ACC_IS_CLONEABLE_FAST,

5314 // array in the heap that GCs wouldn't expect. Move the allocation
5315 // after the traps so we don't allocate the array if we
5316 // deoptimize. This is possible because tightly_coupled_allocation()
5317 // guarantees there's no observer of the allocated array at this point
5318 // and the control flow is simple enough.
5319 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
5320                                                     int saved_reexecute_sp, uint new_idx) {
5321   if (saved_jvms != NULL && !stopped()) {
5322     assert(alloc != NULL, "only with a tightly coupled allocation");
5323     // restore JVM state to the state at the arraycopy
5324     saved_jvms->map()->set_control(map()->control());
5325     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
5326     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
5327     // If we've improved the types of some nodes (null check) while
5328     // emitting the guards, propagate them to the current state
5329     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
5330     set_jvms(saved_jvms);
5331     _reexecute_sp = saved_reexecute_sp;
5332 
5333     // Remove the allocation from above the guards
5334     CallProjections* callprojs = alloc->extract_projections(true);

5335     InitializeNode* init = alloc->initialization();
5336     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5337     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5338     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5339 
5340     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5341     // the allocation (i.e. is only valid if the allocation succeeds):
5342     // 1) replace CastIINode with AllocateArrayNode's length here
5343     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5344     //
5345     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5346     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5347     Node* init_control = init->proj_out(TypeFunc::Control);
5348     Node* alloc_length = alloc->Ideal_length();
5349 #ifdef ASSERT
5350     Node* prev_cast = NULL;
5351 #endif
5352     for (uint i = 0; i < init_control->outcnt(); i++) {
5353       Node* init_out = init_control->raw_out(i);
5354       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5355 #ifdef ASSERT
5356         if (prev_cast == NULL) {
5357           prev_cast = init_out;

5359           if (prev_cast->cmp(*init_out) == false) {
5360             prev_cast->dump();
5361             init_out->dump();
5362             assert(false, "not equal CastIINode");
5363           }
5364         }
5365 #endif
5366         C->gvn_replace_by(init_out, alloc_length);
5367       }
5368     }
5369     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5370 
5371     // move the allocation here (after the guards)
5372     _gvn.hash_delete(alloc);
5373     alloc->set_req(TypeFunc::Control, control());
5374     alloc->set_req(TypeFunc::I_O, i_o());
5375     Node *mem = reset_memory();
5376     set_all_memory(mem);
5377     alloc->set_req(TypeFunc::Memory, mem);
5378     set_control(init->proj_out_or_null(TypeFunc::Control));
5379     set_i_o(callprojs->fallthrough_ioproj);
5380 
5381     // Update memory as done in GraphKit::set_output_for_allocation()
5382     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5383     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5384     if (ary_type->isa_aryptr() && length_type != NULL) {
5385       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5386     }
5387     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5388     int            elemidx  = C->get_alias_index(telemref);
5389     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5390     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5391 
5392     Node* allocx = _gvn.transform(alloc);
5393     assert(allocx == alloc, "where has the allocation gone?");
5394     assert(dest->is_CheckCastPP(), "not an allocation result?");
5395 
5396     _gvn.hash_delete(dest);
5397     dest->set_req(0, control());
5398     Node* destx = _gvn.transform(dest);
5399     assert(destx == dest, "where has the allocation result gone?");

5508         top_src  = src_type->isa_aryptr();
5509         has_src = (top_src != NULL && top_src->elem() != Type::BOTTOM);
5510         src_spec = true;
5511       }
5512       if (!has_dest) {
5513         dest = maybe_cast_profiled_obj(dest, dest_k, true);
5514         dest_type  = _gvn.type(dest);
5515         top_dest  = dest_type->isa_aryptr();
5516         has_dest = (top_dest != NULL && top_dest->elem() != Type::BOTTOM);
5517         dest_spec = true;
5518       }
5519     }
5520   }
5521 
5522   if (has_src && has_dest && can_emit_guards) {
5523     BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5524     BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5525     if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5526     if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5527 
5528     if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
5529       // If both arrays are object arrays then having the exact types
5530       // for both will remove the need for a subtype check at runtime
5531       // before the call and may make it possible to pick a faster copy
5532       // routine (without a subtype check on every element)
5533       // Do we have the exact type of src?
5534       bool could_have_src = src_spec;
5535       // Do we have the exact type of dest?
5536       bool could_have_dest = dest_spec;
5537       ciKlass* src_k = NULL;
5538       ciKlass* dest_k = NULL;
5539       if (!src_spec) {
5540         src_k = src_type->speculative_type_not_null();
5541         if (src_k != NULL && src_k->is_array_klass()) {
5542           could_have_src = true;
5543         }
5544       }
5545       if (!dest_spec) {
5546         dest_k = dest_type->speculative_type_not_null();
5547         if (dest_k != NULL && dest_k->is_array_klass()) {
5548           could_have_dest = true;
5549         }
5550       }
5551       if (could_have_src && could_have_dest) {
5552         // If we can have both exact types, emit the missing guards
5553         if (could_have_src && !src_spec) {
5554           src = maybe_cast_profiled_obj(src, src_k, true);
5555           src_type = _gvn.type(src);
5556           top_src = src_type->isa_aryptr();
5557         }
5558         if (could_have_dest && !dest_spec) {
5559           dest = maybe_cast_profiled_obj(dest, dest_k, true);
5560           dest_type = _gvn.type(dest);
5561           top_dest = dest_type->isa_aryptr();
5562         }
5563       }
5564     }
5565   }
5566 
5567   ciMethod* trap_method = method();
5568   int trap_bci = bci();
5569   if (saved_jvms != NULL) {
5570     trap_method = alloc->jvms()->method();
5571     trap_bci = alloc->jvms()->bci();
5572   }
5573 
5574   bool negative_length_guard_generated = false;
5575 
5576   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5577       can_emit_guards && !src->is_top() && !dest->is_top()) {

5578     // validate arguments: enables transformation the ArrayCopyNode
5579     validated = true;
5580 
5581     RegionNode* slow_region = new RegionNode(1);
5582     record_for_igvn(slow_region);
5583 
5584     // (1) src and dest are arrays.
5585     generate_non_array_guard(load_object_klass(src), slow_region);
5586     generate_non_array_guard(load_object_klass(dest), slow_region);
5587 
5588     // (2) src and dest arrays must have elements of the same BasicType
5589     // done at macro expansion or at Ideal transformation time
5590 
5591     // (4) src_offset must not be negative.
5592     generate_negative_guard(src_offset, slow_region);
5593 
5594     // (5) dest_offset must not be negative.
5595     generate_negative_guard(dest_offset, slow_region);
5596 
5597     // (7) src_offset + length must not exceed length of src.

5600                          slow_region);
5601 
5602     // (8) dest_offset + length must not exceed length of dest.
5603     generate_limit_guard(dest_offset, length,
5604                          load_array_length(dest),
5605                          slow_region);
5606 
5607     // (6) length must not be negative.
5608     // This is also checked in generate_arraycopy() during macro expansion, but
5609     // we also have to check it here for the case where the ArrayCopyNode will
5610     // be eliminated by Escape Analysis.
5611     if (EliminateAllocations) {
5612       generate_negative_guard(length, slow_region);
5613       negative_length_guard_generated = true;
5614     }
5615 
5616     // (9) each element of an oop array must be assignable
5617     Node* dest_klass = load_object_klass(dest);
5618     if (src != dest) {
5619       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5620       slow_region->add_req(not_subtype_ctrl);
5621     }
5622 
5623     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5624     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5625     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5626     src_type = _gvn.type(src);
5627     top_src  = src_type->isa_aryptr();
5628 
5629     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
5630     if (!stopped() && UseFlatArray) {
5631       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
5632       assert(top_dest == NULL || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
5633       if (top_src != NULL && top_src->is_flat()) {
5634         // Src is flat, check that dest is flat as well
5635         if (top_dest != NULL && !top_dest->is_flat()) {
5636           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
5637           // Since dest is flat and src <: dest, dest must have the same type as src.
5638           top_dest = top_src->cast_to_exactness(false);
5639           assert(top_dest->is_flat(), "dest must be flat");
5640           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
5641         }
5642       } else if (top_src == NULL || !top_src->is_not_flat()) {
5643         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5644         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5645         assert(top_dest == NULL || !top_dest->is_flat(), "dest array must not be flat");
5646         generate_fair_guard(flat_array_test(src), slow_region);
5647         if (top_src != NULL) {
5648           top_src = top_src->cast_to_not_flat();
5649           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
5650         }
5651       }
5652     }
5653 
5654     {
5655       PreserveJVMState pjvms(this);
5656       set_control(_gvn.transform(slow_region));
5657       uncommon_trap(Deoptimization::Reason_intrinsic,
5658                     Deoptimization::Action_make_not_entrant);
5659       assert(stopped(), "Should be stopped");
5660     }




5661   }
5662 
5663   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5664 
5665   if (stopped()) {
5666     return true;
5667   }
5668 
5669   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5670                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5671                                           // so the compiler has a chance to eliminate them: during macro expansion,
5672                                           // we have to set their control (CastPP nodes are eliminated).
5673                                           load_object_klass(src), load_object_klass(dest),
5674                                           load_array_length(src), load_array_length(dest));
5675 
5676   ac->set_arraycopy(validated);
5677 
5678   Node* n = _gvn.transform(ac);
5679   if (n == ac) {
5680     ac->connect_outputs(this);
< prev index next >