< prev index next >

src/hotspot/share/opto/library_call.cpp

Print this page

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"

  27 #include "ci/ciUtilities.inline.hpp"
  28 #include "classfile/vmIntrinsics.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "jfr/support/jfrIntrinsics.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/klass.inline.hpp"
  35 #include "oops/objArrayKlass.hpp"
  36 #include "opto/addnode.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/c2compiler.hpp"
  39 #include "opto/castnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/convertnode.hpp"
  42 #include "opto/countbitsnode.hpp"
  43 #include "opto/idealKit.hpp"
  44 #include "opto/library_call.hpp"
  45 #include "opto/mathexactnode.hpp"
  46 #include "opto/mulnode.hpp"

 300   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 301   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 302   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 303   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 304   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 305   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 306 
 307   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 308   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 309 
 310   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 311   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 312   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 313   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 314 
 315   case vmIntrinsics::_compressStringC:
 316   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 317   case vmIntrinsics::_inflateStringC:
 318   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 319 


 320   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 321   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 322   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 323   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 324   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 325   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 326   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 327   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 328   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);

 329 
 330   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 331   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 332   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 333   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 334   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 335   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 336   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 337   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 338   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);

 339 
 340   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 341   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 342   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 343   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 344   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 345   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 346   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 347   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 348   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 349 
 350   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 351   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 352   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 353   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 354   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 355   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 356   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 357   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 358   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 492   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 493   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 494   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 495   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 496   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 497 
 498   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 499   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 500 
 501   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 502 
 503   case vmIntrinsics::_isInstance:
 504   case vmIntrinsics::_getModifiers:
 505   case vmIntrinsics::_isInterface:
 506   case vmIntrinsics::_isArray:
 507   case vmIntrinsics::_isPrimitive:
 508   case vmIntrinsics::_isHidden:
 509   case vmIntrinsics::_getSuperclass:
 510   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 511 





 512   case vmIntrinsics::_floatToRawIntBits:
 513   case vmIntrinsics::_floatToIntBits:
 514   case vmIntrinsics::_intBitsToFloat:
 515   case vmIntrinsics::_doubleToRawLongBits:
 516   case vmIntrinsics::_doubleToLongBits:
 517   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 518 
 519   case vmIntrinsics::_floatIsInfinite:
 520   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 521 
 522   case vmIntrinsics::_numberOfLeadingZeros_i:
 523   case vmIntrinsics::_numberOfLeadingZeros_l:
 524   case vmIntrinsics::_numberOfTrailingZeros_i:
 525   case vmIntrinsics::_numberOfTrailingZeros_l:
 526   case vmIntrinsics::_bitCount_i:
 527   case vmIntrinsics::_bitCount_l:
 528   case vmIntrinsics::_reverse_i:
 529   case vmIntrinsics::_reverse_l:
 530   case vmIntrinsics::_reverseBytes_i:
 531   case vmIntrinsics::_reverseBytes_l:

2146     case vmIntrinsics::_remainderUnsigned_l: {
2147       zero_check_long(argument(2));
2148       // Compile-time detect of null-exception
2149       if (stopped()) {
2150         return true; // keep the graph constructed so far
2151       }
2152       n = new UModLNode(control(), argument(0), argument(2));
2153       break;
2154     }
2155     default:  fatal_unexpected_iid(id);  break;
2156   }
2157   set_result(_gvn.transform(n));
2158   return true;
2159 }
2160 
2161 //----------------------------inline_unsafe_access----------------------------
2162 
2163 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2164   // Attempt to infer a sharper value type from the offset and base type.
2165   ciKlass* sharpened_klass = NULL;

2166 
2167   // See if it is an instance field, with an object type.
2168   if (alias_type->field() != NULL) {
2169     if (alias_type->field()->type()->is_klass()) {
2170       sharpened_klass = alias_type->field()->type()->as_klass();

2171     }
2172   }
2173 
2174   const TypeOopPtr* result = NULL;
2175   // See if it is a narrow oop array.
2176   if (adr_type->isa_aryptr()) {
2177     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2178       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();

2179       if (elem_type != NULL && elem_type->is_loaded()) {
2180         // Sharpen the value type.
2181         result = elem_type;
2182       }
2183     }
2184   }
2185 
2186   // The sharpened class might be unloaded if there is no class loader
2187   // contraint in place.
2188   if (result == NULL && sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2189     // Sharpen the value type.
2190     result = TypeOopPtr::make_from_klass(sharpened_klass);



2191   }
2192   if (result != NULL) {
2193 #ifndef PRODUCT
2194     if (C->print_intrinsics() || C->print_inlining()) {
2195       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2196       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2197     }
2198 #endif
2199   }
2200   return result;
2201 }
2202 
2203 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2204   switch (kind) {
2205       case Relaxed:
2206         return MO_UNORDERED;
2207       case Opaque:
2208         return MO_RELAXED;
2209       case Acquire:
2210         return MO_ACQUIRE;

2225   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2226   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2227 
2228   if (is_reference_type(type)) {
2229     decorators |= ON_UNKNOWN_OOP_REF;
2230   }
2231 
2232   if (unaligned) {
2233     decorators |= C2_UNALIGNED;
2234   }
2235 
2236 #ifndef PRODUCT
2237   {
2238     ResourceMark rm;
2239     // Check the signatures.
2240     ciSignature* sig = callee()->signature();
2241 #ifdef ASSERT
2242     if (!is_store) {
2243       // Object getReference(Object base, int/long offset), etc.
2244       BasicType rtype = sig->return_type()->basic_type();
2245       assert(rtype == type, "getter must return the expected value");
2246       assert(sig->count() == 2, "oop getter has 2 arguments");
2247       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2248       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2249     } else {
2250       // void putReference(Object base, int/long offset, Object x), etc.
2251       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2252       assert(sig->count() == 3, "oop putter has 3 arguments");
2253       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2254       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2255       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2256       assert(vtype == type, "putter must accept the expected value");
2257     }
2258 #endif // ASSERT
2259  }
2260 #endif //PRODUCT
2261 
2262   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2263 
2264   Node* receiver = argument(0);  // type: oop
2265 
2266   // Build address expression.
2267   Node* heap_base_oop = top();
2268 
2269   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2270   Node* base = argument(1);  // type: oop
2271   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2272   Node* offset = argument(2);  // type: long
2273   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2274   // to be plain byte offsets, which are also the same as those accepted
2275   // by oopDesc::field_addr.
2276   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2277          "fieldOffset must be byte-scaled");



















































2278   // 32-bit machines ignore the high half!
2279   offset = ConvL2X(offset);
2280 
2281   // Save state and restore on bailout
2282   uint old_sp = sp();
2283   SafePointNode* old_map = clone_map();
2284 
2285   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2286 
2287   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2288     if (type != T_OBJECT) {
2289       decorators |= IN_NATIVE; // off-heap primitive access
2290     } else {
2291       set_map(old_map);
2292       set_sp(old_sp);
2293       return false; // off-heap oop accesses are not supported
2294     }
2295   } else {
2296     heap_base_oop = base; // on-heap or mixed access
2297   }
2298 
2299   // Can base be NULL? Otherwise, always on-heap access.
2300   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2301 
2302   if (!can_access_non_heap) {
2303     decorators |= IN_HEAP;
2304   }
2305 
2306   Node* val = is_store ? argument(4) : NULL;
2307 
2308   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2309   if (adr_type == TypePtr::NULL_PTR) {
2310     set_map(old_map);
2311     set_sp(old_sp);
2312     return false; // off-heap access with zero address
2313   }
2314 
2315   // Try to categorize the address.
2316   Compile::AliasType* alias_type = C->alias_type(adr_type);
2317   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2318 
2319   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2320       alias_type->adr_type() == TypeAryPtr::RANGE) {
2321     set_map(old_map);
2322     set_sp(old_sp);
2323     return false; // not supported
2324   }
2325 
2326   bool mismatched = false;
2327   BasicType bt = alias_type->basic_type();
























2328   if (bt != T_ILLEGAL) {
2329     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2330     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2331       // Alias type doesn't differentiate between byte[] and boolean[]).
2332       // Use address type to get the element type.
2333       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2334     }
2335     if (is_reference_type(bt, true)) {
2336       // accessing an array field with getReference is not a mismatch
2337       bt = T_OBJECT;
2338     }
2339     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2340       // Don't intrinsify mismatched object accesses
2341       set_map(old_map);
2342       set_sp(old_sp);
2343       return false;
2344     }
2345     mismatched = (bt != type);
2346   } else if (alias_type->adr_type()->isa_oopptr()) {
2347     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2348   }
2349 

























2350   old_map->destruct(&_gvn);
2351   assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2352 
2353   if (mismatched) {
2354     decorators |= C2_MISMATCHED;
2355   }
2356 
2357   // First guess at the value type.
2358   const Type *value_type = Type::get_const_basic_type(type);
2359 
2360   // Figure out the memory ordering.
2361   decorators |= mo_decorator_for_access_kind(kind);
2362 
2363   if (!is_store && type == T_OBJECT) {
2364     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2365     if (tjp != NULL) {
2366       value_type = tjp;




2367     }
2368   }
2369 
2370   receiver = null_check(receiver);
2371   if (stopped()) {
2372     return true;
2373   }
2374   // Heap pointers get a null-check from the interpreter,
2375   // as a courtesy.  However, this is not guaranteed by Unsafe,
2376   // and it is not possible to fully distinguish unintended nulls
2377   // from intended ones in this API.
2378 
2379   if (!is_store) {
2380     Node* p = NULL;
2381     // Try to constant fold a load from a constant field
2382     ciField* field = alias_type->field();
2383     if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2384       // final or stable field
2385       p = make_constant_from_field(field, heap_base_oop);
2386     }
2387 
2388     if (p == NULL) { // Could not constant fold the load
2389       p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);















2390       // Normalize the value returned by getBoolean in the following cases
2391       if (type == T_BOOLEAN &&
2392           (mismatched ||
2393            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2394            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2395                                                       //   and the unsafe access is made to large offset
2396                                                       //   (i.e., larger than the maximum offset necessary for any
2397                                                       //   field access)
2398             ) {
2399           IdealKit ideal = IdealKit(this);
2400 #define __ ideal.
2401           IdealVariable normalized_result(ideal);
2402           __ declarations_done();
2403           __ set(normalized_result, p);
2404           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2405           __ set(normalized_result, ideal.ConI(1));
2406           ideal.end_if();
2407           final_sync(ideal);
2408           p = __ value(normalized_result);
2409 #undef __
2410       }
2411     }
2412     if (type == T_ADDRESS) {
2413       p = gvn().transform(new CastP2XNode(NULL, p));
2414       p = ConvX2UL(p);
2415     }
2416     // The load node has the control of the preceding MemBarCPUOrder.  All
2417     // following nodes will have the control of the MemBarCPUOrder inserted at
2418     // the end of this method.  So, pushing the load onto the stack at a later
2419     // point is fine.
2420     set_result(p);
2421   } else {
2422     if (bt == T_ADDRESS) {
2423       // Repackage the long as a pointer.
2424       val = ConvL2X(val);
2425       val = gvn().transform(new CastX2PNode(val));
2426     }
2427     access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
















2428   }
2429 
2430   return true;
2431 }
2432 




































2433 //----------------------------inline_unsafe_load_store----------------------------
2434 // This method serves a couple of different customers (depending on LoadStoreKind):
2435 //
2436 // LS_cmp_swap:
2437 //
2438 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2439 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2440 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2441 //
2442 // LS_cmp_swap_weak:
2443 //
2444 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2445 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2446 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2447 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2448 //
2449 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2450 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2451 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2452 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2618     }
2619     case LS_cmp_swap:
2620     case LS_cmp_swap_weak:
2621     case LS_get_add:
2622       break;
2623     default:
2624       ShouldNotReachHere();
2625   }
2626 
2627   // Null check receiver.
2628   receiver = null_check(receiver);
2629   if (stopped()) {
2630     return true;
2631   }
2632 
2633   int alias_idx = C->get_alias_index(adr_type);
2634 
2635   if (is_reference_type(type)) {
2636     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2637 













2638     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2639     // could be delayed during Parse (for example, in adjust_map_after_if()).
2640     // Execute transformation here to avoid barrier generation in such case.
2641     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2642       newval = _gvn.makecon(TypePtr::NULL_PTR);
2643 
2644     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2645       // Refine the value to a null constant, when it is known to be null
2646       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2647     }
2648   }
2649 
2650   Node* result = NULL;
2651   switch (kind) {
2652     case LS_cmp_exchange: {
2653       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2654                                             oldval, newval, value_type, type, decorators);
2655       break;
2656     }
2657     case LS_cmp_swap_weak:

2779   Node* cls = null_check(argument(1));
2780   if (stopped())  return true;
2781 
2782   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2783   kls = null_check(kls);
2784   if (stopped())  return true;  // argument was like int.class
2785 
2786   Node* test = NULL;
2787   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2788     // Note:  The argument might still be an illegal value like
2789     // Serializable.class or Object[].class.   The runtime will handle it.
2790     // But we must make an explicit check for initialization.
2791     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2792     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2793     // can generate code to load it as unsigned byte.
2794     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2795     Node* bits = intcon(InstanceKlass::fully_initialized);
2796     test = _gvn.transform(new SubINode(inst, bits));
2797     // The 'test' is non-zero if we need to take a slow path.
2798   }
2799 
2800   Node* obj = new_instance(kls, test);





2801   set_result(obj);
2802   return true;
2803 }
2804 
2805 //------------------------inline_native_time_funcs--------------
2806 // inline code for System.currentTimeMillis() and System.nanoTime()
2807 // these have the same type and signature
2808 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2809   const TypeFunc* tf = OptoRuntime::void_long_Type();
2810   const TypePtr* no_memory_effects = NULL;
2811   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2812   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2813 #ifdef ASSERT
2814   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2815   assert(value_top == top(), "second value must be top");
2816 #endif
2817   set_result(value);
2818   return true;
2819 }
2820 

3356   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3357   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3358 
3359   bool xk = etype->klass_is_exact();
3360 
3361   Node* thread = _gvn.transform(new ThreadLocalNode());
3362   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::extentLocalCache_offset()));
3363   return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3364         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3365 }
3366 
3367 //------------------------inline_native_extentLocalCache------------------
3368 bool LibraryCallKit::inline_native_extentLocalCache() {
3369   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3370   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3371   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3372 
3373   // Because we create the extentLocal cache lazily we have to make the
3374   // type of the result BotPTR.
3375   bool xk = etype->klass_is_exact();
3376   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3377   Node* cache_obj_handle = extentLocalCache_helper();
3378   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3379 
3380   return true;
3381 }
3382 
3383 //------------------------inline_native_setExtentLocalCache------------------
3384 bool LibraryCallKit::inline_native_setExtentLocalCache() {
3385   Node* arr = argument(0);
3386   Node* cache_obj_handle = extentLocalCache_helper();
3387 
3388   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3389   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3390                   MemNode::unordered);
3391 
3392   return true;
3393 }
3394 
3395 //---------------------------load_mirror_from_klass----------------------------
3396 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3397 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3398   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3399   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3400   // mirror = ((OopHandle)mirror)->resolve();
3401   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3402 }
3403 
3404 //-----------------------load_klass_from_mirror_common-------------------------
3405 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3406 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3407 // and branch to the given path on the region.
3408 // If never_see_null, take an uncommon trap on null, so we can optimistically
3409 // compile for the non-null case.
3410 // If the region is NULL, force never_see_null = true.
3411 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3412                                                     bool never_see_null,
3413                                                     RegionNode* region,
3414                                                     int null_path,
3415                                                     int offset) {
3416   if (region == NULL)  never_see_null = true;
3417   Node* p = basic_plus_adr(mirror, offset);
3418   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3419   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3420   Node* null_ctl = top();
3421   kls = null_check_oop(kls, &null_ctl, never_see_null);
3422   if (region != NULL) {
3423     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3426     assert(null_ctl == top(), "no loose ends");
3427   }
3428   return kls;
3429 }
3430 
3431 //--------------------(inline_native_Class_query helpers)---------------------
3432 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3433 // Fall through if (mods & mask) == bits, take the guard otherwise.
3434 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3435   // Branch around if the given klass has the given modifier bit set.
3436   // Like generate_guard, adds a new path onto the region.
3437   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3438   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3439   Node* mask = intcon(modifier_mask);
3440   Node* bits = intcon(modifier_bits);
3441   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3442   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3443   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3444   return generate_fair_guard(bol, region);
3445 }

3446 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3447   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3448 }
3449 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3450   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3451 }
3452 
3453 //-------------------------inline_native_Class_query-------------------
3454 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3455   const Type* return_type = TypeInt::BOOL;
3456   Node* prim_return_value = top();  // what happens if it's a primitive class?
3457   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3458   bool expect_prim = false;     // most of these guys expect to work on refs
3459 
3460   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3461 
3462   Node* mirror = argument(0);
3463   Node* obj    = top();
3464 
3465   switch (id) {

3619 
3620   case vmIntrinsics::_getClassAccessFlags:
3621     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3622     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3623     break;
3624 
3625   default:
3626     fatal_unexpected_iid(id);
3627     break;
3628   }
3629 
3630   // Fall-through is the normal case of a query to a real class.
3631   phi->init_req(1, query_value);
3632   region->init_req(1, control());
3633 
3634   C->set_has_split_ifs(true); // Has chance for split-if optimization
3635   set_result(region, phi);
3636   return true;
3637 }
3638 































3639 //-------------------------inline_Class_cast-------------------
3640 bool LibraryCallKit::inline_Class_cast() {
3641   Node* mirror = argument(0); // Class
3642   Node* obj    = argument(1);
3643   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3644   if (mirror_con == NULL) {
3645     return false;  // dead path (mirror->is_top()).
3646   }
3647   if (obj == NULL || obj->is_top()) {
3648     return false;  // dead path
3649   }
3650   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3651 
3652   // First, see if Class.cast() can be folded statically.
3653   // java_mirror_type() returns non-null for compile-time Class constants.
3654   ciType* tm = mirror_con->java_mirror_type();

3655   if (tm != NULL && tm->is_klass() &&
3656       tp != NULL) {
3657     if (!tp->is_loaded()) {
3658       // Don't use intrinsic when class is not loaded.
3659       return false;
3660     } else {
3661       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass()), tp->as_klass_type());
3662       if (static_res == Compile::SSC_always_true) {
3663         // isInstance() is true - fold the code.



3664         set_result(obj);
3665         return true;
3666       } else if (static_res == Compile::SSC_always_false) {
3667         // Don't use intrinsic, have to throw ClassCastException.
3668         // If the reference is null, the non-intrinsic bytecode will
3669         // be optimized appropriately.
3670         return false;
3671       }
3672     }
3673   }
3674 
3675   // Bailout intrinsic and do normal inlining if exception path is frequent.
3676   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3677     return false;
3678   }
3679 
3680   // Generate dynamic checks.
3681   // Class.cast() is java implementation of _checkcast bytecode.
3682   // Do checkcast (Parse::do_checkcast()) optimizations here.
3683 



3684   mirror = null_check(mirror);
3685   // If mirror is dead, only null-path is taken.
3686   if (stopped()) {
3687     return true;
3688   }
3689 
3690   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3691   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3692   RegionNode* region = new RegionNode(PATH_LIMIT);
3693   record_for_igvn(region);
3694 
3695   // Now load the mirror's klass metaobject, and null-check it.
3696   // If kls is null, we have a primitive mirror and
3697   // nothing is an instance of a primitive type.
3698   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3699 
3700   Node* res = top();
3701   if (!stopped()) {





















3702     Node* bad_type_ctrl = top();
3703     // Do checkcast optimizations.
3704     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3705     region->init_req(_bad_type_path, bad_type_ctrl);
3706   }
3707   if (region->in(_prim_path) != top() ||
3708       region->in(_bad_type_path) != top()) {

3709     // Let Interpreter throw ClassCastException.
3710     PreserveJVMState pjvms(this);
3711     set_control(_gvn.transform(region));
3712     uncommon_trap(Deoptimization::Reason_intrinsic,
3713                   Deoptimization::Action_maybe_recompile);
3714   }
3715   if (!stopped()) {
3716     set_result(res);
3717   }
3718   return true;
3719 }
3720 
3721 
3722 //--------------------------inline_native_subtype_check------------------------
3723 // This intrinsic takes the JNI calls out of the heart of
3724 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3725 bool LibraryCallKit::inline_native_subtype_check() {
3726   // Pull both arguments off the stack.
3727   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3728   args[0] = argument(0);
3729   args[1] = argument(1);
3730   Node* klasses[2];             // corresponding Klasses: superk, subk
3731   klasses[0] = klasses[1] = top();
3732 
3733   enum {
3734     // A full decision tree on {superc is prim, subc is prim}:
3735     _prim_0_path = 1,           // {P,N} => false
3736                                 // {P,P} & superc!=subc => false
3737     _prim_same_path,            // {P,P} & superc==subc => true
3738     _prim_1_path,               // {N,P} => false
3739     _ref_subtype_path,          // {N,N} & subtype check wins => true
3740     _both_ref_path,             // {N,N} & subtype check loses => false
3741     PATH_LIMIT
3742   };
3743 
3744   RegionNode* region = new RegionNode(PATH_LIMIT);

3745   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3746   record_for_igvn(region);

3747 
3748   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3749   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3750   int class_klass_offset = java_lang_Class::klass_offset();
3751 
3752   // First null-check both mirrors and load each mirror's klass metaobject.
3753   int which_arg;
3754   for (which_arg = 0; which_arg <= 1; which_arg++) {
3755     Node* arg = args[which_arg];
3756     arg = null_check(arg);
3757     if (stopped())  break;
3758     args[which_arg] = arg;
3759 
3760     Node* p = basic_plus_adr(arg, class_klass_offset);
3761     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3762     klasses[which_arg] = _gvn.transform(kls);
3763   }
3764 
3765   // Having loaded both klasses, test each for null.
3766   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3767   for (which_arg = 0; which_arg <= 1; which_arg++) {
3768     Node* kls = klasses[which_arg];
3769     Node* null_ctl = top();
3770     kls = null_check_oop(kls, &null_ctl, never_see_null);
3771     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3772     region->init_req(prim_path, null_ctl);



3773     if (stopped())  break;
3774     klasses[which_arg] = kls;
3775   }
3776 
3777   if (!stopped()) {
3778     // now we have two reference types, in klasses[0..1]
3779     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3780     Node* superk = klasses[0];  // the receiver
3781     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));



3782     // now we have a successful reference subtype check
3783     region->set_req(_ref_subtype_path, control());
3784   }
3785 
3786   // If both operands are primitive (both klasses null), then
3787   // we must return true when they are identical primitives.
3788   // It is convenient to test this after the first null klass check.
3789   set_control(region->in(_prim_0_path)); // go back to first null check

3790   if (!stopped()) {
3791     // Since superc is primitive, make a guard for the superc==subc case.
3792     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3793     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3794     generate_guard(bol_eq, region, PROB_FAIR);
3795     if (region->req() == PATH_LIMIT+1) {
3796       // A guard was added.  If the added guard is taken, superc==subc.
3797       region->swap_edges(PATH_LIMIT, _prim_same_path);
3798       region->del_req(PATH_LIMIT);
3799     }
3800     region->set_req(_prim_0_path, control()); // Not equal after all.
3801   }
3802 
3803   // these are the only paths that produce 'true':
3804   phi->set_req(_prim_same_path,   intcon(1));
3805   phi->set_req(_ref_subtype_path, intcon(1));
3806 
3807   // pull together the cases:
3808   assert(region->req() == PATH_LIMIT, "sane region");
3809   for (uint i = 1; i < region->req(); i++) {
3810     Node* ctl = region->in(i);
3811     if (ctl == NULL || ctl == top()) {
3812       region->set_req(i, top());
3813       phi   ->set_req(i, top());
3814     } else if (phi->in(i) == NULL) {
3815       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3816     }
3817   }
3818 
3819   set_control(_gvn.transform(region));
3820   set_result(_gvn.transform(phi));
3821   return true;
3822 }
3823 
3824 //---------------------generate_array_guard_common------------------------
3825 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3826                                                   bool obj_array, bool not_array) {
3827 
3828   if (stopped()) {
3829     return NULL;
3830   }
3831 
3832   // If obj_array/non_array==false/false:
3833   // Branch around if the given klass is in fact an array (either obj or prim).
3834   // If obj_array/non_array==false/true:
3835   // Branch around if the given klass is not an array klass of any kind.
3836   // If obj_array/non_array==true/true:
3837   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3838   // If obj_array/non_array==true/false:
3839   // Branch around if the kls is an oop array (Object[] or subtype)
3840   //
3841   // Like generate_guard, adds a new path onto the region.
3842   jint  layout_con = 0;
3843   Node* layout_val = get_layout_helper(kls, layout_con);
3844   if (layout_val == NULL) {
3845     bool query = (obj_array
3846                   ? Klass::layout_helper_is_objArray(layout_con)
3847                   : Klass::layout_helper_is_array(layout_con));
3848     if (query == not_array) {







3849       return NULL;                       // never a branch
3850     } else {                             // always a branch
3851       Node* always_branch = control();
3852       if (region != NULL)
3853         region->add_req(always_branch);
3854       set_control(top());
3855       return always_branch;
3856     }
3857   }





















3858   // Now test the correct condition.
3859   jint  nval = (obj_array
3860                 ? (jint)(Klass::_lh_array_tag_type_value
3861                    <<    Klass::_lh_array_tag_shift)
3862                 : Klass::_lh_neutral_value);
3863   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3864   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3865   // invert the test if we are looking for a non-array
3866   if (not_array)  btest = BoolTest(btest).negate();
3867   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3868   return generate_fair_guard(bol, region);
3869 }
3870 
3871 
3872 //-----------------------inline_native_newArray--------------------------
3873 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3874 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3875 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3876   Node* mirror;
3877   Node* count_val;
3878   if (uninitialized) {
3879     mirror    = argument(1);
3880     count_val = argument(2);
3881   } else {
3882     mirror    = argument(0);
3883     count_val = argument(1);
3884   }
3885 
3886   mirror = null_check(mirror);
3887   // If mirror or obj is dead, only null-path is taken.
3888   if (stopped())  return true;
3889 
3890   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3891   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3892   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3893   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

3998   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3999   { PreserveReexecuteState preexecs(this);
4000     jvms()->set_should_reexecute(true);
4001 
4002     array_type_mirror = null_check(array_type_mirror);
4003     original          = null_check(original);
4004 
4005     // Check if a null path was taken unconditionally.
4006     if (stopped())  return true;
4007 
4008     Node* orig_length = load_array_length(original);
4009 
4010     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
4011     klass_node = null_check(klass_node);
4012 
4013     RegionNode* bailout = new RegionNode(1);
4014     record_for_igvn(bailout);
4015 
4016     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4017     // Bail out if that is so.
4018     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);












4019     if (not_objArray != NULL) {
4020       // Improve the klass node's type from the new optimistic assumption:
4021       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4022       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4023       Node* cast = new CastPPNode(klass_node, akls);
4024       cast->init_req(0, control());
4025       klass_node = _gvn.transform(cast);
4026     }
4027 
4028     // Bail out if either start or end is negative.
4029     generate_negative_guard(start, bailout, &start);
4030     generate_negative_guard(end,   bailout, &end);
4031 
4032     Node* length = end;
4033     if (_gvn.type(start) != TypeInt::ZERO) {
4034       length = _gvn.transform(new SubINode(end, start));
4035     }
4036 
4037     // Bail out if length is negative.
4038     // Without this the new_array would throw
4039     // NegativeArraySizeException but IllegalArgumentException is what
4040     // should be thrown
4041     generate_negative_guard(length, bailout, &length);
4042 
































4043     if (bailout->req() > 1) {
4044       PreserveJVMState pjvms(this);
4045       set_control(_gvn.transform(bailout));
4046       uncommon_trap(Deoptimization::Reason_intrinsic,
4047                     Deoptimization::Action_maybe_recompile);
4048     }
4049 
4050     if (!stopped()) {
4051       // How many elements will we copy from the original?
4052       // The answer is MinI(orig_length - start, length).
4053       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4054       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4055 
4056       // Generate a direct call to the right arraycopy function(s).
4057       // We know the copy is disjoint but we might not know if the
4058       // oop stores need checking.
4059       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4060       // This will fail a store-check if x contains any non-nulls.
4061 
4062       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4065       // to the copyOf to be validated, including that the copy to the
4066       // new array won't trigger an ArrayStoreException. That subtype
4067       // check can be optimized if we know something on the type of
4068       // the input array from type speculation.
4069       if (_gvn.type(klass_node)->singleton()) {
4070         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4071         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4072 
4073         int test = C->static_subtype_check(superk, subk);
4074         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4075           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4076           if (t_original->speculative_type() != NULL) {
4077             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4078           }
4079         }
4080       }
4081 
4082       bool validated = false;
4083       // Reason_class_check rather than Reason_intrinsic because we
4084       // want to intrinsify even if this traps.
4085       if (!too_many_traps(Deoptimization::Reason_class_check)) {
4086         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4087 
4088         if (not_subtype_ctrl != top()) {
4089           PreserveJVMState pjvms(this);
4090           set_control(not_subtype_ctrl);
4091           uncommon_trap(Deoptimization::Reason_class_check,
4092                         Deoptimization::Action_make_not_entrant);
4093           assert(stopped(), "Should be stopped");
4094         }
4095         validated = true;
4096       }
4097 
4098       if (!stopped()) {
4099         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4100 
4101         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4102                                                 load_object_klass(original), klass_node);
4103         if (!is_copyOfRange) {
4104           ac->set_copyof(validated);
4105         } else {

4204   set_edges_for_java_call(slow_call);
4205   return slow_call;
4206 }
4207 
4208 
4209 /**
4210  * Build special case code for calls to hashCode on an object. This call may
4211  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4212  * slightly different code.
4213  */
4214 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4215   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4216   assert(!(is_virtual && is_static), "either virtual, special, or static");
4217 
4218   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4219 
4220   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4221   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4222   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4223   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4224   Node* obj = NULL;





4225   if (!is_static) {
4226     // Check for hashing null object
4227     obj = null_check_receiver();
4228     if (stopped())  return true;        // unconditionally null
4229     result_reg->init_req(_null_path, top());
4230     result_val->init_req(_null_path, top());
4231   } else {
4232     // Do a null check, and return zero if null.
4233     // System.identityHashCode(null) == 0
4234     obj = argument(0);
4235     Node* null_ctl = top();
4236     obj = null_check_oop(obj, &null_ctl);
4237     result_reg->init_req(_null_path, null_ctl);
4238     result_val->init_req(_null_path, _gvn.intcon(0));
4239   }
4240 
4241   // Unconditionally null?  Then return right away.
4242   if (stopped()) {
4243     set_control( result_reg->in(_null_path));
4244     if (!stopped())
4245       set_result(result_val->in(_null_path));
4246     return true;
4247   }
4248 
4249   // We only go to the fast case code if we pass a number of guards.  The
4250   // paths which do not pass are accumulated in the slow_region.
4251   RegionNode* slow_region = new RegionNode(1);
4252   record_for_igvn(slow_region);
4253 
4254   // If this is a virtual call, we generate a funny guard.  We pull out
4255   // the vtable entry corresponding to hashCode() from the target object.
4256   // If the target method which we are calling happens to be the native
4257   // Object hashCode() method, we pass the guard.  We do not need this
4258   // guard for non-virtual calls -- the caller is known to be the native
4259   // Object hashCode().
4260   if (is_virtual) {
4261     // After null check, get the object's klass.
4262     Node* obj_klass = load_object_klass(obj);
4263     generate_virtual_guard(obj_klass, slow_region);
4264   }
4265 
4266   // Get the header out of the object, use LoadMarkNode when available
4267   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4268   // The control of the load must be NULL. Otherwise, the load can move before
4269   // the null check after castPP removal.
4270   Node* no_ctrl = NULL;
4271   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4272 
4273   // Test the header to see if it is unlocked.
4274   Node *lock_mask      = _gvn.MakeConX(markWord::lock_mask_in_place);

4275   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4276   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4277   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4278   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4279 
4280   generate_slow_guard(test_unlocked, slow_region);
4281 
4282   // Get the hash value and check to see that it has been properly assigned.
4283   // We depend on hash_mask being at most 32 bits and avoid the use of
4284   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4285   // vm: see markWord.hpp.
4286   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4287   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4288   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4289   // This hack lets the hash bits live anywhere in the mark object now, as long
4290   // as the shift drops the relevant bits into the low 32 bits.  Note that
4291   // Java spec says that HashCode is an int so there's no point in capturing
4292   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4293   hshifted_header      = ConvX2I(hshifted_header);
4294   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4320     // this->control() comes from set_results_for_java_call
4321     result_reg->init_req(_slow_path, control());
4322     result_val->init_req(_slow_path, slow_result);
4323     result_io  ->set_req(_slow_path, i_o());
4324     result_mem ->set_req(_slow_path, reset_memory());
4325   }
4326 
4327   // Return the combined state.
4328   set_i_o(        _gvn.transform(result_io)  );
4329   set_all_memory( _gvn.transform(result_mem));
4330 
4331   set_result(result_reg, result_val);
4332   return true;
4333 }
4334 
4335 //---------------------------inline_native_getClass----------------------------
4336 // public final native Class<?> java.lang.Object.getClass();
4337 //
4338 // Build special case code for calls to getClass on an object.
4339 bool LibraryCallKit::inline_native_getClass() {
4340   Node* obj = null_check_receiver();









4341   if (stopped())  return true;
4342   set_result(load_mirror_from_klass(load_object_klass(obj)));
4343   return true;
4344 }
4345 
4346 //-----------------inline_native_Reflection_getCallerClass---------------------
4347 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4348 //
4349 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4350 //
4351 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4352 // in that it must skip particular security frames and checks for
4353 // caller sensitive methods.
4354 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4355 #ifndef PRODUCT
4356   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4357     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4358   }
4359 #endif
4360 

4677 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4678 //
4679 // The general case has two steps, allocation and copying.
4680 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4681 //
4682 // Copying also has two cases, oop arrays and everything else.
4683 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4684 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4685 //
4686 // These steps fold up nicely if and when the cloned object's klass
4687 // can be sharply typed as an object array, a type array, or an instance.
4688 //
4689 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4690   PhiNode* result_val;
4691 
4692   // Set the reexecute bit for the interpreter to reexecute
4693   // the bytecode that invokes Object.clone if deoptimization happens.
4694   { PreserveReexecuteState preexecs(this);
4695     jvms()->set_should_reexecute(true);
4696 
4697     Node* obj = null_check_receiver();





4698     if (stopped())  return true;
4699 
4700     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4701 
4702     // If we are going to clone an instance, we need its exact type to
4703     // know the number and types of fields to convert the clone to
4704     // loads/stores. Maybe a speculative type can help us.
4705     if (!obj_type->klass_is_exact() &&
4706         obj_type->speculative_type() != NULL &&
4707         obj_type->speculative_type()->is_instance_klass()) {

4708       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4709       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4710           !spec_ik->has_injected_fields()) {
4711         if (!obj_type->isa_instptr() ||
4712             obj_type->is_instptr()->instance_klass()->has_subklass()) {
4713           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4714         }
4715       }
4716     }
4717 
4718     // Conservatively insert a memory barrier on all memory slices.
4719     // Do not let writes into the original float below the clone.
4720     insert_mem_bar(Op_MemBarCPUOrder);
4721 
4722     // paths into result_reg:
4723     enum {
4724       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4725       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4726       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4727       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4728       PATH_LIMIT
4729     };
4730     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4731     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4732     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4733     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4734     record_for_igvn(result_reg);
4735 
4736     Node* obj_klass = load_object_klass(obj);





4737     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4738     if (array_ctl != NULL) {
4739       // It's an array.
4740       PreserveJVMState pjvms(this);
4741       set_control(array_ctl);
4742       Node* obj_length = load_array_length(obj);
4743       Node* obj_size  = NULL;
4744       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
4745 
4746       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4747       if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
4748         // If it is an oop array, it requires very special treatment,
4749         // because gc barriers are required when accessing the array.
4750         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4751         if (is_obja != NULL) {
4752           PreserveJVMState pjvms2(this);
4753           set_control(is_obja);
4754           // Generate a direct call to the right arraycopy function(s).
4755           // Clones are always tightly coupled.
4756           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
4757           ac->set_clone_oop_array();
4758           Node* n = _gvn.transform(ac);
4759           assert(n == ac, "cannot disappear");
4760           ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
4761 
4762           result_reg->init_req(_objArray_path, control());
4763           result_val->init_req(_objArray_path, alloc_obj);
4764           result_i_o ->set_req(_objArray_path, i_o());
4765           result_mem ->set_req(_objArray_path, reset_memory());
4766         }
4767       }
4768       // Otherwise, there are no barriers to worry about.
4769       // (We can dispense with card marks if we know the allocation
4770       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4771       //  causes the non-eden paths to take compensating steps to
4772       //  simulate a fresh allocation, so that no further
4773       //  card marks are required in compiled code to initialize
4774       //  the object.)
4775 
4776       if (!stopped()) {
4777         copy_to_clone(obj, alloc_obj, obj_size, true);
4778 
4779         // Present the results of the copy.
4780         result_reg->init_req(_array_path, control());
4781         result_val->init_req(_array_path, alloc_obj);
4782         result_i_o ->set_req(_array_path, i_o());
4783         result_mem ->set_req(_array_path, reset_memory());




































4784       }
4785     }
4786 
4787     // We only go to the instance fast case code if we pass a number of guards.
4788     // The paths which do not pass are accumulated in the slow_region.
4789     RegionNode* slow_region = new RegionNode(1);
4790     record_for_igvn(slow_region);
4791     if (!stopped()) {
4792       // It's an instance (we did array above).  Make the slow-path tests.
4793       // If this is a virtual call, we generate a funny guard.  We grab
4794       // the vtable entry corresponding to clone() from the target object.
4795       // If the target method which we are calling happens to be the
4796       // Object clone() method, we pass the guard.  We do not need this
4797       // guard for non-virtual calls; the caller is known to be the native
4798       // Object clone().
4799       if (is_virtual) {
4800         generate_virtual_guard(obj_klass, slow_region);
4801       }
4802 
4803       // The object must be easily cloneable and must not have a finalizer.
4804       // Both of these conditions may be checked in a single test.
4805       // We could optimize the test further, but we don't care.
4806       generate_access_flags_guard(obj_klass,
4807                                   // Test both conditions:
4808                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4809                                   // Must be cloneable but not finalizer:
4810                                   JVM_ACC_IS_CLONEABLE_FAST,

4931 // array in the heap that GCs wouldn't expect. Move the allocation
4932 // after the traps so we don't allocate the array if we
4933 // deoptimize. This is possible because tightly_coupled_allocation()
4934 // guarantees there's no observer of the allocated array at this point
4935 // and the control flow is simple enough.
4936 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4937                                                     int saved_reexecute_sp, uint new_idx) {
4938   if (saved_jvms != NULL && !stopped()) {
4939     assert(alloc != NULL, "only with a tightly coupled allocation");
4940     // restore JVM state to the state at the arraycopy
4941     saved_jvms->map()->set_control(map()->control());
4942     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4943     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4944     // If we've improved the types of some nodes (null check) while
4945     // emitting the guards, propagate them to the current state
4946     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4947     set_jvms(saved_jvms);
4948     _reexecute_sp = saved_reexecute_sp;
4949 
4950     // Remove the allocation from above the guards
4951     CallProjections callprojs;
4952     alloc->extract_projections(&callprojs, true);
4953     InitializeNode* init = alloc->initialization();
4954     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4955     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4956     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4957 
4958     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
4959     // the allocation (i.e. is only valid if the allocation succeeds):
4960     // 1) replace CastIINode with AllocateArrayNode's length here
4961     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
4962     //
4963     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
4964     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
4965     Node* init_control = init->proj_out(TypeFunc::Control);
4966     Node* alloc_length = alloc->Ideal_length();
4967 #ifdef ASSERT
4968     Node* prev_cast = NULL;
4969 #endif
4970     for (uint i = 0; i < init_control->outcnt(); i++) {
4971       Node* init_out = init_control->raw_out(i);
4972       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
4973 #ifdef ASSERT
4974         if (prev_cast == NULL) {
4975           prev_cast = init_out;

4977           if (prev_cast->cmp(*init_out) == false) {
4978             prev_cast->dump();
4979             init_out->dump();
4980             assert(false, "not equal CastIINode");
4981           }
4982         }
4983 #endif
4984         C->gvn_replace_by(init_out, alloc_length);
4985       }
4986     }
4987     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4988 
4989     // move the allocation here (after the guards)
4990     _gvn.hash_delete(alloc);
4991     alloc->set_req(TypeFunc::Control, control());
4992     alloc->set_req(TypeFunc::I_O, i_o());
4993     Node *mem = reset_memory();
4994     set_all_memory(mem);
4995     alloc->set_req(TypeFunc::Memory, mem);
4996     set_control(init->proj_out_or_null(TypeFunc::Control));
4997     set_i_o(callprojs.fallthrough_ioproj);
4998 
4999     // Update memory as done in GraphKit::set_output_for_allocation()
5000     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5001     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5002     if (ary_type->isa_aryptr() && length_type != NULL) {
5003       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5004     }
5005     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5006     int            elemidx  = C->get_alias_index(telemref);
5007     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5008     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5009 
5010     Node* allocx = _gvn.transform(alloc);
5011     assert(allocx == alloc, "where has the allocation gone?");
5012     assert(dest->is_CheckCastPP(), "not an allocation result?");
5013 
5014     _gvn.hash_delete(dest);
5015     dest->set_req(0, control());
5016     Node* destx = _gvn.transform(dest);
5017     assert(destx == dest, "where has the allocation result gone?");

5153       // Do we have the exact type of dest?
5154       bool could_have_dest = dest_spec;
5155       ciKlass* src_k = NULL;
5156       ciKlass* dest_k = NULL;
5157       if (!src_spec) {
5158         src_k = src_type->speculative_type_not_null();
5159         if (src_k != NULL && src_k->is_array_klass()) {
5160           could_have_src = true;
5161         }
5162       }
5163       if (!dest_spec) {
5164         dest_k = dest_type->speculative_type_not_null();
5165         if (dest_k != NULL && dest_k->is_array_klass()) {
5166           could_have_dest = true;
5167         }
5168       }
5169       if (could_have_src && could_have_dest) {
5170         // If we can have both exact types, emit the missing guards
5171         if (could_have_src && !src_spec) {
5172           src = maybe_cast_profiled_obj(src, src_k, true);


5173         }
5174         if (could_have_dest && !dest_spec) {
5175           dest = maybe_cast_profiled_obj(dest, dest_k, true);


5176         }
5177       }
5178     }
5179   }
5180 
5181   ciMethod* trap_method = method();
5182   int trap_bci = bci();
5183   if (saved_jvms != NULL) {
5184     trap_method = alloc->jvms()->method();
5185     trap_bci = alloc->jvms()->bci();
5186   }
5187 
5188   bool negative_length_guard_generated = false;
5189 
5190   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5191       can_emit_guards &&
5192       !src->is_top() && !dest->is_top()) {
5193     // validate arguments: enables transformation the ArrayCopyNode
5194     validated = true;
5195 
5196     RegionNode* slow_region = new RegionNode(1);
5197     record_for_igvn(slow_region);
5198 
5199     // (1) src and dest are arrays.
5200     generate_non_array_guard(load_object_klass(src), slow_region);
5201     generate_non_array_guard(load_object_klass(dest), slow_region);
5202 
5203     // (2) src and dest arrays must have elements of the same BasicType
5204     // done at macro expansion or at Ideal transformation time
5205 
5206     // (4) src_offset must not be negative.
5207     generate_negative_guard(src_offset, slow_region);
5208 
5209     // (5) dest_offset must not be negative.
5210     generate_negative_guard(dest_offset, slow_region);
5211 
5212     // (7) src_offset + length must not exceed length of src.

5215                          slow_region);
5216 
5217     // (8) dest_offset + length must not exceed length of dest.
5218     generate_limit_guard(dest_offset, length,
5219                          load_array_length(dest),
5220                          slow_region);
5221 
5222     // (6) length must not be negative.
5223     // This is also checked in generate_arraycopy() during macro expansion, but
5224     // we also have to check it here for the case where the ArrayCopyNode will
5225     // be eliminated by Escape Analysis.
5226     if (EliminateAllocations) {
5227       generate_negative_guard(length, slow_region);
5228       negative_length_guard_generated = true;
5229     }
5230 
5231     // (9) each element of an oop array must be assignable
5232     Node* dest_klass = load_object_klass(dest);
5233     if (src != dest) {
5234       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);


5235 
5236       if (not_subtype_ctrl != top()) {
5237         PreserveJVMState pjvms(this);
5238         set_control(not_subtype_ctrl);
5239         uncommon_trap(Deoptimization::Reason_intrinsic,
5240                       Deoptimization::Action_make_not_entrant);
5241         assert(stopped(), "Should be stopped");

























5242       }
5243     }

5244     {
5245       PreserveJVMState pjvms(this);
5246       set_control(_gvn.transform(slow_region));
5247       uncommon_trap(Deoptimization::Reason_intrinsic,
5248                     Deoptimization::Action_make_not_entrant);
5249       assert(stopped(), "Should be stopped");
5250     }
5251 
5252     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5253     const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5254     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5255   }
5256 
5257   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5258 
5259   if (stopped()) {
5260     return true;
5261   }
5262 
5263   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5264                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5265                                           // so the compiler has a chance to eliminate them: during macro expansion,
5266                                           // we have to set their control (CastPP nodes are eliminated).
5267                                           load_object_klass(src), load_object_klass(dest),
5268                                           load_array_length(src), load_array_length(dest));
5269 
5270   ac->set_arraycopy(validated);
5271 
5272   Node* n = _gvn.transform(ac);
5273   if (n == ac) {
5274     ac->connect_outputs(this);

   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "ci/ciFlatArrayKlass.hpp"
  28 #include "ci/ciUtilities.inline.hpp"
  29 #include "classfile/vmIntrinsics.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "jfr/support/jfrIntrinsics.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "opto/addnode.hpp"
  38 #include "opto/arraycopynode.hpp"
  39 #include "opto/c2compiler.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/cfgnode.hpp"
  42 #include "opto/convertnode.hpp"
  43 #include "opto/countbitsnode.hpp"
  44 #include "opto/idealKit.hpp"
  45 #include "opto/library_call.hpp"
  46 #include "opto/mathexactnode.hpp"
  47 #include "opto/mulnode.hpp"

 301   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 302   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 303   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 304   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 305   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar(StrIntrinsicNode::U);
 306   case vmIntrinsics::_indexOfL_char:            return inline_string_indexOfChar(StrIntrinsicNode::L);
 307 
 308   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 309   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 310 
 311   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 312   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 313   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 314   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 315 
 316   case vmIntrinsics::_compressStringC:
 317   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 318   case vmIntrinsics::_inflateStringC:
 319   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 320 
 321   case vmIntrinsics::_makePrivateBuffer:        return inline_unsafe_make_private_buffer();
 322   case vmIntrinsics::_finishPrivateBuffer:      return inline_unsafe_finish_private_buffer();
 323   case vmIntrinsics::_getReference:             return inline_unsafe_access(!is_store, T_OBJECT,   Relaxed, false);
 324   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_store, T_BOOLEAN,  Relaxed, false);
 325   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_store, T_BYTE,     Relaxed, false);
 326   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_store, T_SHORT,    Relaxed, false);
 327   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_store, T_CHAR,     Relaxed, false);
 328   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_store, T_INT,      Relaxed, false);
 329   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_store, T_LONG,     Relaxed, false);
 330   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_store, T_FLOAT,    Relaxed, false);
 331   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_store, T_DOUBLE,   Relaxed, false);
 332   case vmIntrinsics::_getValue:                 return inline_unsafe_access(!is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 333 
 334   case vmIntrinsics::_putReference:             return inline_unsafe_access( is_store, T_OBJECT,   Relaxed, false);
 335   case vmIntrinsics::_putBoolean:               return inline_unsafe_access( is_store, T_BOOLEAN,  Relaxed, false);
 336   case vmIntrinsics::_putByte:                  return inline_unsafe_access( is_store, T_BYTE,     Relaxed, false);
 337   case vmIntrinsics::_putShort:                 return inline_unsafe_access( is_store, T_SHORT,    Relaxed, false);
 338   case vmIntrinsics::_putChar:                  return inline_unsafe_access( is_store, T_CHAR,     Relaxed, false);
 339   case vmIntrinsics::_putInt:                   return inline_unsafe_access( is_store, T_INT,      Relaxed, false);
 340   case vmIntrinsics::_putLong:                  return inline_unsafe_access( is_store, T_LONG,     Relaxed, false);
 341   case vmIntrinsics::_putFloat:                 return inline_unsafe_access( is_store, T_FLOAT,    Relaxed, false);
 342   case vmIntrinsics::_putDouble:                return inline_unsafe_access( is_store, T_DOUBLE,   Relaxed, false);
 343   case vmIntrinsics::_putValue:                 return inline_unsafe_access( is_store, T_PRIMITIVE_OBJECT,Relaxed, false);
 344 
 345   case vmIntrinsics::_getReferenceVolatile:     return inline_unsafe_access(!is_store, T_OBJECT,   Volatile, false);
 346   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_store, T_BOOLEAN,  Volatile, false);
 347   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_store, T_BYTE,     Volatile, false);
 348   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_store, T_SHORT,    Volatile, false);
 349   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_store, T_CHAR,     Volatile, false);
 350   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_store, T_INT,      Volatile, false);
 351   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_store, T_LONG,     Volatile, false);
 352   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_store, T_FLOAT,    Volatile, false);
 353   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_store, T_DOUBLE,   Volatile, false);
 354 
 355   case vmIntrinsics::_putReferenceVolatile:     return inline_unsafe_access( is_store, T_OBJECT,   Volatile, false);
 356   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access( is_store, T_BOOLEAN,  Volatile, false);
 357   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access( is_store, T_BYTE,     Volatile, false);
 358   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access( is_store, T_SHORT,    Volatile, false);
 359   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access( is_store, T_CHAR,     Volatile, false);
 360   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access( is_store, T_INT,      Volatile, false);
 361   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access( is_store, T_LONG,     Volatile, false);
 362   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access( is_store, T_FLOAT,    Volatile, false);
 363   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access( is_store, T_DOUBLE,   Volatile, false);

 497   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 498   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 499   case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
 500   case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
 501   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 502 
 503   case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
 504   case vmIntrinsics::_newArray:                   return inline_unsafe_newArray(false);
 505 
 506   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 507 
 508   case vmIntrinsics::_isInstance:
 509   case vmIntrinsics::_getModifiers:
 510   case vmIntrinsics::_isInterface:
 511   case vmIntrinsics::_isArray:
 512   case vmIntrinsics::_isPrimitive:
 513   case vmIntrinsics::_isHidden:
 514   case vmIntrinsics::_getSuperclass:
 515   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 516 
 517   case vmIntrinsics::_asPrimaryType:
 518   case vmIntrinsics::_asPrimaryTypeArg:
 519   case vmIntrinsics::_asValueType:
 520   case vmIntrinsics::_asValueTypeArg:           return inline_primitive_Class_conversion(intrinsic_id());
 521 
 522   case vmIntrinsics::_floatToRawIntBits:
 523   case vmIntrinsics::_floatToIntBits:
 524   case vmIntrinsics::_intBitsToFloat:
 525   case vmIntrinsics::_doubleToRawLongBits:
 526   case vmIntrinsics::_doubleToLongBits:
 527   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 528 
 529   case vmIntrinsics::_floatIsInfinite:
 530   case vmIntrinsics::_doubleIsInfinite:         return inline_fp_range_check(intrinsic_id());
 531 
 532   case vmIntrinsics::_numberOfLeadingZeros_i:
 533   case vmIntrinsics::_numberOfLeadingZeros_l:
 534   case vmIntrinsics::_numberOfTrailingZeros_i:
 535   case vmIntrinsics::_numberOfTrailingZeros_l:
 536   case vmIntrinsics::_bitCount_i:
 537   case vmIntrinsics::_bitCount_l:
 538   case vmIntrinsics::_reverse_i:
 539   case vmIntrinsics::_reverse_l:
 540   case vmIntrinsics::_reverseBytes_i:
 541   case vmIntrinsics::_reverseBytes_l:

2156     case vmIntrinsics::_remainderUnsigned_l: {
2157       zero_check_long(argument(2));
2158       // Compile-time detect of null-exception
2159       if (stopped()) {
2160         return true; // keep the graph constructed so far
2161       }
2162       n = new UModLNode(control(), argument(0), argument(2));
2163       break;
2164     }
2165     default:  fatal_unexpected_iid(id);  break;
2166   }
2167   set_result(_gvn.transform(n));
2168   return true;
2169 }
2170 
2171 //----------------------------inline_unsafe_access----------------------------
2172 
2173 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2174   // Attempt to infer a sharper value type from the offset and base type.
2175   ciKlass* sharpened_klass = NULL;
2176   bool null_free = false;
2177 
2178   // See if it is an instance field, with an object type.
2179   if (alias_type->field() != NULL) {
2180     if (alias_type->field()->type()->is_klass()) {
2181       sharpened_klass = alias_type->field()->type()->as_klass();
2182       null_free = alias_type->field()->is_null_free();
2183     }
2184   }
2185 
2186   const TypeOopPtr* result = NULL;
2187   // See if it is a narrow oop array.
2188   if (adr_type->isa_aryptr()) {
2189     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2190       const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2191       null_free = adr_type->is_aryptr()->is_null_free();
2192       if (elem_type != NULL && elem_type->is_loaded()) {
2193         // Sharpen the value type.
2194         result = elem_type;
2195       }
2196     }
2197   }
2198 
2199   // The sharpened class might be unloaded if there is no class loader
2200   // contraint in place.
2201   if (result == NULL && sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2202     // Sharpen the value type.
2203     result = TypeOopPtr::make_from_klass(sharpened_klass);
2204     if (null_free) {
2205       result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2206     }
2207   }
2208   if (result != NULL) {
2209 #ifndef PRODUCT
2210     if (C->print_intrinsics() || C->print_inlining()) {
2211       tty->print("  from base type:  ");  adr_type->dump(); tty->cr();
2212       tty->print("  sharpened value: ");  result->dump();    tty->cr();
2213     }
2214 #endif
2215   }
2216   return result;
2217 }
2218 
2219 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2220   switch (kind) {
2221       case Relaxed:
2222         return MO_UNORDERED;
2223       case Opaque:
2224         return MO_RELAXED;
2225       case Acquire:
2226         return MO_ACQUIRE;

2241   guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2242   assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2243 
2244   if (is_reference_type(type)) {
2245     decorators |= ON_UNKNOWN_OOP_REF;
2246   }
2247 
2248   if (unaligned) {
2249     decorators |= C2_UNALIGNED;
2250   }
2251 
2252 #ifndef PRODUCT
2253   {
2254     ResourceMark rm;
2255     // Check the signatures.
2256     ciSignature* sig = callee()->signature();
2257 #ifdef ASSERT
2258     if (!is_store) {
2259       // Object getReference(Object base, int/long offset), etc.
2260       BasicType rtype = sig->return_type()->basic_type();
2261       assert(rtype == type || (rtype == T_OBJECT && type == T_PRIMITIVE_OBJECT), "getter must return the expected value");
2262       assert(sig->count() == 2 || (type == T_PRIMITIVE_OBJECT && sig->count() == 3), "oop getter has 2 or 3 arguments");
2263       assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2264       assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2265     } else {
2266       // void putReference(Object base, int/long offset, Object x), etc.
2267       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2268       assert(sig->count() == 3 || (type == T_PRIMITIVE_OBJECT && sig->count() == 4), "oop putter has 3 arguments");
2269       assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2270       assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2271       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2272       assert(vtype == type || (type == T_PRIMITIVE_OBJECT && vtype == T_OBJECT), "putter must accept the expected value");
2273     }
2274 #endif // ASSERT
2275  }
2276 #endif //PRODUCT
2277 
2278   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2279 
2280   Node* receiver = argument(0);  // type: oop
2281 
2282   // Build address expression.
2283   Node* heap_base_oop = top();
2284 
2285   // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2286   Node* base = argument(1);  // type: oop
2287   // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2288   Node* offset = argument(2);  // type: long
2289   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2290   // to be plain byte offsets, which are also the same as those accepted
2291   // by oopDesc::field_addr.
2292   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2293          "fieldOffset must be byte-scaled");
2294 
2295   ciInlineKlass* inline_klass = NULL;
2296   if (type == T_PRIMITIVE_OBJECT) {
2297     const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2298     if (cls == NULL || cls->const_oop() == NULL) {
2299       return false;
2300     }
2301     ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2302     if (!mirror_type->is_inlinetype()) {
2303       return false;
2304     }
2305     inline_klass = mirror_type->as_inline_klass();
2306   }
2307 
2308   if (base->is_InlineTypeBase()) {
2309     InlineTypeBaseNode* vt = base->as_InlineTypeBase();
2310     if (is_store) {
2311       if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->isa_inlinetype() || !_gvn.type(vt)->is_inlinetype()->larval()) {
2312         return false;
2313       }
2314       base = vt->get_oop();
2315     } else {
2316       if (offset->is_Con()) {
2317         long off = find_long_con(offset, 0);
2318         ciInlineKlass* vk = vt->type()->inline_klass();
2319         if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2320           return false;
2321         }
2322 
2323         ciField* field = vk->get_non_flattened_field_by_offset(off);
2324         if (field != NULL) {
2325           BasicType bt = field->layout_type();
2326           if (bt == T_ARRAY || bt == T_NARROWOOP || (bt == T_PRIMITIVE_OBJECT && !field->is_flattened())) {
2327             bt = T_OBJECT;
2328           }
2329           if (bt == type && (bt != T_PRIMITIVE_OBJECT || field->type() == inline_klass)) {
2330             set_result(vt->field_value_by_offset(off, false));
2331             return true;
2332           }
2333         }
2334       }
2335       if (vt->is_InlineType()) {
2336         // Re-execute the unsafe access if allocation triggers deoptimization.
2337         PreserveReexecuteState preexecs(this);
2338         jvms()->set_should_reexecute(true);
2339         vt = vt->buffer(this);
2340       }
2341       base = vt->get_oop();
2342     }
2343   }
2344 
2345   // 32-bit machines ignore the high half!
2346   offset = ConvL2X(offset);
2347 
2348   // Save state and restore on bailout
2349   uint old_sp = sp();
2350   SafePointNode* old_map = clone_map();
2351 
2352   Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2353 
2354   if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2355     if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
2356       decorators |= IN_NATIVE; // off-heap primitive access
2357     } else {
2358       set_map(old_map);
2359       set_sp(old_sp);
2360       return false; // off-heap oop accesses are not supported
2361     }
2362   } else {
2363     heap_base_oop = base; // on-heap or mixed access
2364   }
2365 
2366   // Can base be NULL? Otherwise, always on-heap access.
2367   bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2368 
2369   if (!can_access_non_heap) {
2370     decorators |= IN_HEAP;
2371   }
2372 
2373   Node* val = is_store ? argument(4 + (type == T_PRIMITIVE_OBJECT ? 1 : 0)) : NULL;
2374 
2375   const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2376   if (adr_type == TypePtr::NULL_PTR) {
2377     set_map(old_map);
2378     set_sp(old_sp);
2379     return false; // off-heap access with zero address
2380   }
2381 
2382   // Try to categorize the address.
2383   Compile::AliasType* alias_type = C->alias_type(adr_type);
2384   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2385 
2386   if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2387       alias_type->adr_type() == TypeAryPtr::RANGE) {
2388     set_map(old_map);
2389     set_sp(old_sp);
2390     return false; // not supported
2391   }
2392 
2393   bool mismatched = false;
2394   BasicType bt = T_ILLEGAL;
2395   ciField* field = NULL;
2396   if (adr_type->isa_instptr()) {
2397     const TypeInstPtr* instptr = adr_type->is_instptr();
2398     ciInstanceKlass* k = instptr->instance_klass();
2399     int off = instptr->offset();
2400     if (instptr->const_oop() != NULL &&
2401         k == ciEnv::current()->Class_klass() &&
2402         instptr->offset() >= (k->size_helper() * wordSize)) {
2403       k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2404       field = k->get_field_by_offset(off, true);
2405     } else {
2406       field = k->get_non_flattened_field_by_offset(off);
2407     }
2408     if (field != NULL) {
2409       bt = field->layout_type();
2410     }
2411     assert(bt == alias_type->basic_type() || bt == T_PRIMITIVE_OBJECT, "should match");
2412     if (field != NULL && bt == T_PRIMITIVE_OBJECT && !field->is_flattened()) {
2413       bt = T_OBJECT;
2414     }
2415   } else {
2416     bt = alias_type->basic_type();
2417   }
2418 
2419   if (bt != T_ILLEGAL) {
2420     assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2421     if (bt == T_BYTE && adr_type->isa_aryptr()) {
2422       // Alias type doesn't differentiate between byte[] and boolean[]).
2423       // Use address type to get the element type.
2424       bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2425     }
2426     if (bt != T_PRIMITIVE_OBJECT && is_reference_type(bt, true)) {
2427       // accessing an array field with getReference is not a mismatch
2428       bt = T_OBJECT;
2429     }
2430     if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2431       // Don't intrinsify mismatched object accesses
2432       set_map(old_map);
2433       set_sp(old_sp);
2434       return false;
2435     }
2436     mismatched = (bt != type);
2437   } else if (alias_type->adr_type()->isa_oopptr()) {
2438     mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2439   }
2440 
2441   if (type == T_PRIMITIVE_OBJECT) {
2442     if (adr_type->isa_instptr()) {
2443       if (field == NULL || field->type() != inline_klass) {
2444         mismatched = true;
2445       }
2446     } else if (adr_type->isa_aryptr()) {
2447       const Type* elem = adr_type->is_aryptr()->elem();
2448       if (!elem->isa_inlinetype()) {
2449         mismatched = true;
2450       } else if (elem->inline_klass() != inline_klass) {
2451         mismatched = true;
2452       }
2453     } else {
2454       mismatched = true;
2455     }
2456     if (is_store) {
2457       const Type* val_t = _gvn.type(val);
2458       if (!(val_t->isa_inlinetype() || val_t->is_inlinetypeptr()) || val_t->inline_klass() != inline_klass) {
2459         set_map(old_map);
2460         set_sp(old_sp);
2461         return false;
2462       }
2463     }
2464   }
2465 
2466   old_map->destruct(&_gvn);
2467   assert(!mismatched || type == T_PRIMITIVE_OBJECT || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2468 
2469   if (mismatched) {
2470     decorators |= C2_MISMATCHED;
2471   }
2472 
2473   // First guess at the value type.
2474   const Type *value_type = Type::get_const_basic_type(type);
2475 
2476   // Figure out the memory ordering.
2477   decorators |= mo_decorator_for_access_kind(kind);
2478 
2479   if (!is_store) {
2480     if (type == T_OBJECT) {
2481       const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2482       if (tjp != NULL) {
2483         value_type = tjp;
2484       }
2485     } else if (type == T_PRIMITIVE_OBJECT) {
2486       value_type = NULL;
2487     }
2488   }
2489 
2490   receiver = null_check(receiver);
2491   if (stopped()) {
2492     return true;
2493   }
2494   // Heap pointers get a null-check from the interpreter,
2495   // as a courtesy.  However, this is not guaranteed by Unsafe,
2496   // and it is not possible to fully distinguish unintended nulls
2497   // from intended ones in this API.
2498 
2499   if (!is_store) {
2500     Node* p = NULL;
2501     // Try to constant fold a load from a constant field
2502 
2503     if (heap_base_oop != top() && field != NULL && field->is_constant() && !field->is_flattened() && !mismatched) {
2504       // final or stable field
2505       p = make_constant_from_field(field, heap_base_oop);
2506     }
2507 
2508     if (p == NULL) { // Could not constant fold the load
2509       if (type == T_PRIMITIVE_OBJECT) {
2510         if (adr_type->isa_instptr() && !mismatched) {
2511           ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2512           int offset = adr_type->is_instptr()->offset();
2513           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, base, holder, offset, decorators);
2514         } else {
2515           p = InlineTypeNode::make_from_flattened(this, inline_klass, base, adr, NULL, 0, decorators);
2516         }
2517       } else {
2518         p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2519         const TypeOopPtr* ptr = value_type->make_oopptr();
2520         if (ptr != NULL && ptr->is_inlinetypeptr()) {
2521           // Load a non-flattened inline type from memory
2522           p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2523         }
2524       }
2525       // Normalize the value returned by getBoolean in the following cases
2526       if (type == T_BOOLEAN &&
2527           (mismatched ||
2528            heap_base_oop == top() ||                  // - heap_base_oop is NULL or
2529            (can_access_non_heap && field == NULL))    // - heap_base_oop is potentially NULL
2530                                                       //   and the unsafe access is made to large offset
2531                                                       //   (i.e., larger than the maximum offset necessary for any
2532                                                       //   field access)
2533             ) {
2534           IdealKit ideal = IdealKit(this);
2535 #define __ ideal.
2536           IdealVariable normalized_result(ideal);
2537           __ declarations_done();
2538           __ set(normalized_result, p);
2539           __ if_then(p, BoolTest::ne, ideal.ConI(0));
2540           __ set(normalized_result, ideal.ConI(1));
2541           ideal.end_if();
2542           final_sync(ideal);
2543           p = __ value(normalized_result);
2544 #undef __
2545       }
2546     }
2547     if (type == T_ADDRESS) {
2548       p = gvn().transform(new CastP2XNode(NULL, p));
2549       p = ConvX2UL(p);
2550     }
2551     // The load node has the control of the preceding MemBarCPUOrder.  All
2552     // following nodes will have the control of the MemBarCPUOrder inserted at
2553     // the end of this method.  So, pushing the load onto the stack at a later
2554     // point is fine.
2555     set_result(p);
2556   } else {
2557     if (bt == T_ADDRESS) {
2558       // Repackage the long as a pointer.
2559       val = ConvL2X(val);
2560       val = gvn().transform(new CastX2PNode(val));
2561     }
2562     if (type == T_PRIMITIVE_OBJECT) {
2563       if (adr_type->isa_instptr() && !mismatched) {
2564         ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2565         int offset = adr_type->is_instptr()->offset();
2566         val->as_InlineTypeBase()->store_flattened(this, base, base, holder, offset, decorators);
2567       } else {
2568         val->as_InlineTypeBase()->store_flattened(this, base, adr, NULL, 0, decorators);
2569       }
2570     } else {
2571       access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2572     }
2573   }
2574 
2575   if (argument(1)->is_InlineType() && is_store) {
2576     InlineTypeBaseNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(base)->inline_klass());
2577     value = value->make_larval(this, false);
2578     replace_in_map(argument(1), value);
2579   }
2580 
2581   return true;
2582 }
2583 
2584 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2585   Node* receiver = argument(0);
2586   Node* value = argument(1);
2587   if (!value->is_InlineTypeBase()) {
2588     return false;
2589   }
2590 
2591   receiver = null_check(receiver);
2592   if (stopped()) {
2593     return true;
2594   }
2595 
2596   set_result(value->as_InlineTypeBase()->make_larval(this, true));
2597   return true;
2598 }
2599 
2600 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2601   Node* receiver = argument(0);
2602   Node* buffer = argument(1);
2603   if (!buffer->is_InlineType()) {
2604     return false;
2605   }
2606   InlineTypeNode* vt = buffer->as_InlineType();
2607   if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_inlinetype()->larval()) {
2608     return false;
2609   }
2610 
2611   receiver = null_check(receiver);
2612   if (stopped()) {
2613     return true;
2614   }
2615 
2616   set_result(vt->finish_larval(this));
2617   return true;
2618 }
2619 
2620 //----------------------------inline_unsafe_load_store----------------------------
2621 // This method serves a couple of different customers (depending on LoadStoreKind):
2622 //
2623 // LS_cmp_swap:
2624 //
2625 //   boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2626 //   boolean compareAndSetInt(   Object o, long offset, int    expected, int    x);
2627 //   boolean compareAndSetLong(  Object o, long offset, long   expected, long   x);
2628 //
2629 // LS_cmp_swap_weak:
2630 //
2631 //   boolean weakCompareAndSetReference(       Object o, long offset, Object expected, Object x);
2632 //   boolean weakCompareAndSetReferencePlain(  Object o, long offset, Object expected, Object x);
2633 //   boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2634 //   boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2635 //
2636 //   boolean weakCompareAndSetInt(          Object o, long offset, int    expected, int    x);
2637 //   boolean weakCompareAndSetIntPlain(     Object o, long offset, int    expected, int    x);
2638 //   boolean weakCompareAndSetIntAcquire(   Object o, long offset, int    expected, int    x);
2639 //   boolean weakCompareAndSetIntRelease(   Object o, long offset, int    expected, int    x);

2805     }
2806     case LS_cmp_swap:
2807     case LS_cmp_swap_weak:
2808     case LS_get_add:
2809       break;
2810     default:
2811       ShouldNotReachHere();
2812   }
2813 
2814   // Null check receiver.
2815   receiver = null_check(receiver);
2816   if (stopped()) {
2817     return true;
2818   }
2819 
2820   int alias_idx = C->get_alias_index(adr_type);
2821 
2822   if (is_reference_type(type)) {
2823     decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2824 
2825     if (oldval != NULL && oldval->is_InlineType()) {
2826       // Re-execute the unsafe access if allocation triggers deoptimization.
2827       PreserveReexecuteState preexecs(this);
2828       jvms()->set_should_reexecute(true);
2829       oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2830     }
2831     if (newval != NULL && newval->is_InlineType()) {
2832       // Re-execute the unsafe access if allocation triggers deoptimization.
2833       PreserveReexecuteState preexecs(this);
2834       jvms()->set_should_reexecute(true);
2835       newval = newval->as_InlineType()->buffer(this)->get_oop();
2836     }
2837 
2838     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2839     // could be delayed during Parse (for example, in adjust_map_after_if()).
2840     // Execute transformation here to avoid barrier generation in such case.
2841     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2842       newval = _gvn.makecon(TypePtr::NULL_PTR);
2843 
2844     if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2845       // Refine the value to a null constant, when it is known to be null
2846       oldval = _gvn.makecon(TypePtr::NULL_PTR);
2847     }
2848   }
2849 
2850   Node* result = NULL;
2851   switch (kind) {
2852     case LS_cmp_exchange: {
2853       result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2854                                             oldval, newval, value_type, type, decorators);
2855       break;
2856     }
2857     case LS_cmp_swap_weak:

2979   Node* cls = null_check(argument(1));
2980   if (stopped())  return true;
2981 
2982   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2983   kls = null_check(kls);
2984   if (stopped())  return true;  // argument was like int.class
2985 
2986   Node* test = NULL;
2987   if (LibraryCallKit::klass_needs_init_guard(kls)) {
2988     // Note:  The argument might still be an illegal value like
2989     // Serializable.class or Object[].class.   The runtime will handle it.
2990     // But we must make an explicit check for initialization.
2991     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2992     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2993     // can generate code to load it as unsigned byte.
2994     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2995     Node* bits = intcon(InstanceKlass::fully_initialized);
2996     test = _gvn.transform(new SubINode(inst, bits));
2997     // The 'test' is non-zero if we need to take a slow path.
2998   }
2999   Node* obj = NULL;
3000   const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3001   if (tkls != NULL && tkls->instance_klass()->is_inlinetype()) {
3002     obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3003   } else {
3004     obj = new_instance(kls, test);
3005   }
3006   set_result(obj);
3007   return true;
3008 }
3009 
3010 //------------------------inline_native_time_funcs--------------
3011 // inline code for System.currentTimeMillis() and System.nanoTime()
3012 // these have the same type and signature
3013 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3014   const TypeFunc* tf = OptoRuntime::void_long_Type();
3015   const TypePtr* no_memory_effects = NULL;
3016   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3017   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3018 #ifdef ASSERT
3019   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3020   assert(value_top == top(), "second value must be top");
3021 #endif
3022   set_result(value);
3023   return true;
3024 }
3025 

3561   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3562   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3563 
3564   bool xk = etype->klass_is_exact();
3565 
3566   Node* thread = _gvn.transform(new ThreadLocalNode());
3567   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::extentLocalCache_offset()));
3568   return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(),
3569         TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3570 }
3571 
3572 //------------------------inline_native_extentLocalCache------------------
3573 bool LibraryCallKit::inline_native_extentLocalCache() {
3574   ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3575   const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3576   const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3577 
3578   // Because we create the extentLocal cache lazily we have to make the
3579   // type of the result BotPTR.
3580   bool xk = etype->klass_is_exact();
3581   const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3582   Node* cache_obj_handle = extentLocalCache_helper();
3583   set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3584 
3585   return true;
3586 }
3587 
3588 //------------------------inline_native_setExtentLocalCache------------------
3589 bool LibraryCallKit::inline_native_setExtentLocalCache() {
3590   Node* arr = argument(0);
3591   Node* cache_obj_handle = extentLocalCache_helper();
3592 
3593   const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3594   store_to_memory(control(), cache_obj_handle, arr, T_OBJECT, adr_type,
3595                   MemNode::unordered);
3596 
3597   return true;
3598 }
3599 









3600 //-----------------------load_klass_from_mirror_common-------------------------
3601 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3602 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3603 // and branch to the given path on the region.
3604 // If never_see_null, take an uncommon trap on null, so we can optimistically
3605 // compile for the non-null case.
3606 // If the region is NULL, force never_see_null = true.
3607 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3608                                                     bool never_see_null,
3609                                                     RegionNode* region,
3610                                                     int null_path,
3611                                                     int offset) {
3612   if (region == NULL)  never_see_null = true;
3613   Node* p = basic_plus_adr(mirror, offset);
3614   const TypeKlassPtr*  kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3615   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3616   Node* null_ctl = top();
3617   kls = null_check_oop(kls, &null_ctl, never_see_null);
3618   if (region != NULL) {
3619     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).

3622     assert(null_ctl == top(), "no loose ends");
3623   }
3624   return kls;
3625 }
3626 
3627 //--------------------(inline_native_Class_query helpers)---------------------
3628 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3629 // Fall through if (mods & mask) == bits, take the guard otherwise.
3630 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3631   // Branch around if the given klass has the given modifier bit set.
3632   // Like generate_guard, adds a new path onto the region.
3633   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3634   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3635   Node* mask = intcon(modifier_mask);
3636   Node* bits = intcon(modifier_bits);
3637   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3638   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3639   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3640   return generate_fair_guard(bol, region);
3641 }
3642 
3643 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3644   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3645 }
3646 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3647   return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3648 }
3649 
3650 //-------------------------inline_native_Class_query-------------------
3651 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3652   const Type* return_type = TypeInt::BOOL;
3653   Node* prim_return_value = top();  // what happens if it's a primitive class?
3654   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3655   bool expect_prim = false;     // most of these guys expect to work on refs
3656 
3657   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3658 
3659   Node* mirror = argument(0);
3660   Node* obj    = top();
3661 
3662   switch (id) {

3816 
3817   case vmIntrinsics::_getClassAccessFlags:
3818     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3819     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3820     break;
3821 
3822   default:
3823     fatal_unexpected_iid(id);
3824     break;
3825   }
3826 
3827   // Fall-through is the normal case of a query to a real class.
3828   phi->init_req(1, query_value);
3829   region->init_req(1, control());
3830 
3831   C->set_has_split_ifs(true); // Has chance for split-if optimization
3832   set_result(region, phi);
3833   return true;
3834 }
3835 
3836 //-------------------------inline_primitive_Class_conversion-------------------
3837 //               Class<T> java.lang.Class                  .asPrimaryType()
3838 // public static Class<T> jdk.internal.value.PrimitiveClass.asPrimaryType(Class<T>)
3839 //               Class<T> java.lang.Class                  .asValueType()
3840 // public static Class<T> jdk.internal.value.PrimitiveClass.asValueType(Class<T>)
3841 bool LibraryCallKit::inline_primitive_Class_conversion(vmIntrinsics::ID id) {
3842   Node* mirror = argument(0); // Receiver/argument Class
3843   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3844   if (mirror_con == NULL) {
3845     return false;
3846   }
3847 
3848   bool is_val_mirror = true;
3849   ciType* tm = mirror_con->java_mirror_type(&is_val_mirror);
3850   if (tm != NULL) {
3851     Node* result = mirror;
3852     if ((id == vmIntrinsics::_asPrimaryType || id == vmIntrinsics::_asPrimaryTypeArg) && is_val_mirror) {
3853       result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->ref_mirror()));
3854     } else if (id == vmIntrinsics::_asValueType || id == vmIntrinsics::_asValueTypeArg) {
3855       if (!tm->is_inlinetype()) {
3856         return false; // Throw UnsupportedOperationException
3857       } else if (!is_val_mirror) {
3858         result = _gvn.makecon(TypeInstPtr::make(tm->as_inline_klass()->val_mirror()));
3859       }
3860     }
3861     set_result(result);
3862     return true;
3863   }
3864   return false;
3865 }
3866 
3867 //-------------------------inline_Class_cast-------------------
3868 bool LibraryCallKit::inline_Class_cast() {
3869   Node* mirror = argument(0); // Class
3870   Node* obj    = argument(1);
3871   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3872   if (mirror_con == NULL) {
3873     return false;  // dead path (mirror->is_top()).
3874   }
3875   if (obj == NULL || obj->is_top()) {
3876     return false;  // dead path
3877   }
3878   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3879 
3880   // First, see if Class.cast() can be folded statically.
3881   // java_mirror_type() returns non-null for compile-time Class constants.
3882   bool requires_null_check = false;
3883   ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
3884   if (tm != NULL && tm->is_klass() &&
3885       tp != NULL) {
3886     if (!tp->is_loaded()) {
3887       // Don't use intrinsic when class is not loaded.
3888       return false;
3889     } else {
3890       int static_res = C->static_subtype_check(TypeKlassPtr::make(tm->as_klass()), tp->as_klass_type());
3891       if (static_res == Compile::SSC_always_true) {
3892         // isInstance() is true - fold the code.
3893         if (requires_null_check) {
3894           obj = null_check(obj);
3895         }
3896         set_result(obj);
3897         return true;
3898       } else if (static_res == Compile::SSC_always_false) {
3899         // Don't use intrinsic, have to throw ClassCastException.
3900         // If the reference is null, the non-intrinsic bytecode will
3901         // be optimized appropriately.
3902         return false;
3903       }
3904     }
3905   }
3906 
3907   // Bailout intrinsic and do normal inlining if exception path is frequent.
3908   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3909     return false;
3910   }
3911 
3912   // Generate dynamic checks.
3913   // Class.cast() is java implementation of _checkcast bytecode.
3914   // Do checkcast (Parse::do_checkcast()) optimizations here.
3915 
3916   if (requires_null_check) {
3917     obj = null_check(obj);
3918   }
3919   mirror = null_check(mirror);
3920   // If mirror is dead, only null-path is taken.
3921   if (stopped()) {
3922     return true;
3923   }
3924 
3925   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3926   enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
3927   RegionNode* region = new RegionNode(PATH_LIMIT);
3928   record_for_igvn(region);
3929 
3930   // Now load the mirror's klass metaobject, and null-check it.
3931   // If kls is null, we have a primitive mirror and
3932   // nothing is an instance of a primitive type.
3933   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3934 
3935   Node* res = top();
3936   if (!stopped()) {
3937     if (EnableValhalla && !requires_null_check) {
3938       // Check if we are casting to QMyValue
3939       Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
3940       if (ctrl_val_mirror != NULL) {
3941         RegionNode* r = new RegionNode(3);
3942         record_for_igvn(r);
3943         r->init_req(1, control());
3944 
3945         // Casting to QMyValue, check for null
3946         set_control(ctrl_val_mirror);
3947         { // PreserveJVMState because null check replaces obj in map
3948           PreserveJVMState pjvms(this);
3949           Node* null_ctr = top();
3950           null_check_oop(obj, &null_ctr);
3951           region->init_req(_npe_path, null_ctr);
3952           r->init_req(2, control());
3953         }
3954         set_control(_gvn.transform(r));
3955       }
3956     }
3957 
3958     Node* bad_type_ctrl = top();
3959     // Do checkcast optimizations.
3960     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3961     region->init_req(_bad_type_path, bad_type_ctrl);
3962   }
3963   if (region->in(_prim_path) != top() ||
3964       region->in(_bad_type_path) != top() ||
3965       region->in(_npe_path) != top()) {
3966     // Let Interpreter throw ClassCastException.
3967     PreserveJVMState pjvms(this);
3968     set_control(_gvn.transform(region));
3969     uncommon_trap(Deoptimization::Reason_intrinsic,
3970                   Deoptimization::Action_maybe_recompile);
3971   }
3972   if (!stopped()) {
3973     set_result(res);
3974   }
3975   return true;
3976 }
3977 
3978 
3979 //--------------------------inline_native_subtype_check------------------------
3980 // This intrinsic takes the JNI calls out of the heart of
3981 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3982 bool LibraryCallKit::inline_native_subtype_check() {
3983   // Pull both arguments off the stack.
3984   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3985   args[0] = argument(0);
3986   args[1] = argument(1);
3987   Node* klasses[2];             // corresponding Klasses: superk, subk
3988   klasses[0] = klasses[1] = top();
3989 
3990   enum {
3991     // A full decision tree on {superc is prim, subc is prim}:
3992     _prim_0_path = 1,           // {P,N} => false
3993                                 // {P,P} & superc!=subc => false
3994     _prim_same_path,            // {P,P} & superc==subc => true
3995     _prim_1_path,               // {N,P} => false
3996     _ref_subtype_path,          // {N,N} & subtype check wins => true
3997     _both_ref_path,             // {N,N} & subtype check loses => false
3998     PATH_LIMIT
3999   };
4000 
4001   RegionNode* region = new RegionNode(PATH_LIMIT);
4002   RegionNode* prim_region = new RegionNode(2);
4003   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
4004   record_for_igvn(region);
4005   record_for_igvn(prim_region);
4006 
4007   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
4008   const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4009   int class_klass_offset = java_lang_Class::klass_offset();
4010 
4011   // First null-check both mirrors and load each mirror's klass metaobject.
4012   int which_arg;
4013   for (which_arg = 0; which_arg <= 1; which_arg++) {
4014     Node* arg = args[which_arg];
4015     arg = null_check(arg);
4016     if (stopped())  break;
4017     args[which_arg] = arg;
4018 
4019     Node* p = basic_plus_adr(arg, class_klass_offset);
4020     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
4021     klasses[which_arg] = _gvn.transform(kls);
4022   }
4023 
4024   // Having loaded both klasses, test each for null.
4025   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4026   for (which_arg = 0; which_arg <= 1; which_arg++) {
4027     Node* kls = klasses[which_arg];
4028     Node* null_ctl = top();
4029     kls = null_check_oop(kls, &null_ctl, never_see_null);
4030     if (which_arg == 0) {
4031       prim_region->init_req(1, null_ctl);
4032     } else {
4033       region->init_req(_prim_1_path, null_ctl);
4034     }
4035     if (stopped())  break;
4036     klasses[which_arg] = kls;
4037   }
4038 
4039   if (!stopped()) {
4040     // now we have two reference types, in klasses[0..1]
4041     Node* subk   = klasses[1];  // the argument to isAssignableFrom
4042     Node* superk = klasses[0];  // the receiver
4043     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4044     // If superc is an inline mirror, we also need to check if superc == subc because LMyValue
4045     // is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
4046     generate_fair_guard(is_val_mirror(args[0]), prim_region);
4047     // now we have a successful reference subtype check
4048     region->set_req(_ref_subtype_path, control());
4049   }
4050 
4051   // If both operands are primitive (both klasses null), then
4052   // we must return true when they are identical primitives.
4053   // It is convenient to test this after the first null klass check.
4054   // This path is also used if superc is a value mirror.
4055   set_control(_gvn.transform(prim_region));
4056   if (!stopped()) {
4057     // Since superc is primitive, make a guard for the superc==subc case.
4058     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4059     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4060     generate_fair_guard(bol_eq, region);
4061     if (region->req() == PATH_LIMIT+1) {
4062       // A guard was added.  If the added guard is taken, superc==subc.
4063       region->swap_edges(PATH_LIMIT, _prim_same_path);
4064       region->del_req(PATH_LIMIT);
4065     }
4066     region->set_req(_prim_0_path, control()); // Not equal after all.
4067   }
4068 
4069   // these are the only paths that produce 'true':
4070   phi->set_req(_prim_same_path,   intcon(1));
4071   phi->set_req(_ref_subtype_path, intcon(1));
4072 
4073   // pull together the cases:
4074   assert(region->req() == PATH_LIMIT, "sane region");
4075   for (uint i = 1; i < region->req(); i++) {
4076     Node* ctl = region->in(i);
4077     if (ctl == NULL || ctl == top()) {
4078       region->set_req(i, top());
4079       phi   ->set_req(i, top());
4080     } else if (phi->in(i) == NULL) {
4081       phi->set_req(i, intcon(0)); // all other paths produce 'false'
4082     }
4083   }
4084 
4085   set_control(_gvn.transform(region));
4086   set_result(_gvn.transform(phi));
4087   return true;
4088 }
4089 
4090 //---------------------generate_array_guard_common------------------------
4091 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {

4092 
4093   if (stopped()) {
4094     return NULL;
4095   }
4096 









4097   // Like generate_guard, adds a new path onto the region.
4098   jint  layout_con = 0;
4099   Node* layout_val = get_layout_helper(kls, layout_con);
4100   if (layout_val == NULL) {
4101     bool query = 0;
4102     switch(kind) {
4103       case ObjectArray:    query = Klass::layout_helper_is_objArray(layout_con); break;
4104       case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4105       case TypeArray:      query = Klass::layout_helper_is_typeArray(layout_con); break;
4106       case AnyArray:       query = Klass::layout_helper_is_array(layout_con); break;
4107       case NonArray:       query = !Klass::layout_helper_is_array(layout_con); break;
4108       default:
4109         ShouldNotReachHere();
4110     }
4111     if (!query) {
4112       return NULL;                       // never a branch
4113     } else {                             // always a branch
4114       Node* always_branch = control();
4115       if (region != NULL)
4116         region->add_req(always_branch);
4117       set_control(top());
4118       return always_branch;
4119     }
4120   }
4121   unsigned int value = 0;
4122   BoolTest::mask btest = BoolTest::illegal;
4123   switch(kind) {
4124     case ObjectArray:
4125     case NonObjectArray: {
4126       value = Klass::_lh_array_tag_obj_value;
4127       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4128       btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4129       break;
4130     }
4131     case TypeArray: {
4132       value = Klass::_lh_array_tag_type_value;
4133       layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4134       btest = BoolTest::eq;
4135       break;
4136     }
4137     case AnyArray:    value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4138     case NonArray:    value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4139     default:
4140       ShouldNotReachHere();
4141   }
4142   // Now test the correct condition.
4143   jint nval = (jint)value;



4144   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));



4145   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4146   return generate_fair_guard(bol, region);
4147 }
4148 
4149 
4150 //-----------------------inline_native_newArray--------------------------
4151 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4152 // private        native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4153 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4154   Node* mirror;
4155   Node* count_val;
4156   if (uninitialized) {
4157     mirror    = argument(1);
4158     count_val = argument(2);
4159   } else {
4160     mirror    = argument(0);
4161     count_val = argument(1);
4162   }
4163 
4164   mirror = null_check(mirror);
4165   // If mirror or obj is dead, only null-path is taken.
4166   if (stopped())  return true;
4167 
4168   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4169   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4170   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4171   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);

4276   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4277   { PreserveReexecuteState preexecs(this);
4278     jvms()->set_should_reexecute(true);
4279 
4280     array_type_mirror = null_check(array_type_mirror);
4281     original          = null_check(original);
4282 
4283     // Check if a null path was taken unconditionally.
4284     if (stopped())  return true;
4285 
4286     Node* orig_length = load_array_length(original);
4287 
4288     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
4289     klass_node = null_check(klass_node);
4290 
4291     RegionNode* bailout = new RegionNode(1);
4292     record_for_igvn(bailout);
4293 
4294     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4295     // Bail out if that is so.
4296     // Inline type array may have object field that would require a
4297     // write barrier. Conservatively, go to slow path.
4298     // TODO 8251971: Optimize for the case when flat src/dst are later found
4299     // to not contain oops (i.e., move this check to the macro expansion phase).
4300     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4301     const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4302     const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4303     bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4304                         // Can src array be flat and contain oops?
4305                         (orig_t == NULL || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4306                         // Can dest array be flat and contain oops?
4307                         tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4308     Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4309     if (not_objArray != NULL) {
4310       // Improve the klass node's type from the new optimistic assumption:
4311       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4312       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4313       Node* cast = new CastPPNode(klass_node, akls);
4314       cast->init_req(0, control());
4315       klass_node = _gvn.transform(cast);
4316     }
4317 
4318     // Bail out if either start or end is negative.
4319     generate_negative_guard(start, bailout, &start);
4320     generate_negative_guard(end,   bailout, &end);
4321 
4322     Node* length = end;
4323     if (_gvn.type(start) != TypeInt::ZERO) {
4324       length = _gvn.transform(new SubINode(end, start));
4325     }
4326 
4327     // Bail out if length is negative.
4328     // Without this the new_array would throw
4329     // NegativeArraySizeException but IllegalArgumentException is what
4330     // should be thrown
4331     generate_negative_guard(length, bailout, &length);
4332 
4333     // Handle inline type arrays
4334     bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4335     if (!stopped()) {
4336       orig_t = _gvn.type(original)->isa_aryptr();
4337       if (orig_t != NULL && orig_t->is_flat()) {
4338         // Src is flat, check that dest is flat as well
4339         if (exclude_flat) {
4340           // Dest can't be flat, bail out
4341           bailout->add_req(control());
4342           set_control(top());
4343         } else {
4344           generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4345         }
4346       } else if (UseFlatArray && (orig_t == NULL || !orig_t->is_not_flat()) &&
4347                  // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4348                  ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4349         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4350         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4351         generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4352         if (orig_t != NULL) {
4353           orig_t = orig_t->cast_to_not_flat();
4354           original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4355         }
4356       }
4357       if (!can_validate) {
4358         // No validation. The subtype check emitted at macro expansion time will not go to the slow
4359         // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4360         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4361         generate_fair_guard(null_free_array_test(klass_node), bailout);
4362       }
4363     }
4364 
4365     if (bailout->req() > 1) {
4366       PreserveJVMState pjvms(this);
4367       set_control(_gvn.transform(bailout));
4368       uncommon_trap(Deoptimization::Reason_intrinsic,
4369                     Deoptimization::Action_maybe_recompile);
4370     }
4371 
4372     if (!stopped()) {
4373       // How many elements will we copy from the original?
4374       // The answer is MinI(orig_length - start, length).
4375       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4376       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4377 
4378       // Generate a direct call to the right arraycopy function(s).
4379       // We know the copy is disjoint but we might not know if the
4380       // oop stores need checking.
4381       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
4382       // This will fail a store-check if x contains any non-nulls.
4383 
4384       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to

4387       // to the copyOf to be validated, including that the copy to the
4388       // new array won't trigger an ArrayStoreException. That subtype
4389       // check can be optimized if we know something on the type of
4390       // the input array from type speculation.
4391       if (_gvn.type(klass_node)->singleton()) {
4392         const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4393         const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4394 
4395         int test = C->static_subtype_check(superk, subk);
4396         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4397           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4398           if (t_original->speculative_type() != NULL) {
4399             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4400           }
4401         }
4402       }
4403 
4404       bool validated = false;
4405       // Reason_class_check rather than Reason_intrinsic because we
4406       // want to intrinsify even if this traps.
4407       if (can_validate) {
4408         Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4409 
4410         if (not_subtype_ctrl != top()) {
4411           PreserveJVMState pjvms(this);
4412           set_control(not_subtype_ctrl);
4413           uncommon_trap(Deoptimization::Reason_class_check,
4414                         Deoptimization::Action_make_not_entrant);
4415           assert(stopped(), "Should be stopped");
4416         }
4417         validated = true;
4418       }
4419 
4420       if (!stopped()) {
4421         newcopy = new_array(klass_node, length, 0);  // no arguments to push
4422 
4423         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4424                                                 load_object_klass(original), klass_node);
4425         if (!is_copyOfRange) {
4426           ac->set_copyof(validated);
4427         } else {

4526   set_edges_for_java_call(slow_call);
4527   return slow_call;
4528 }
4529 
4530 
4531 /**
4532  * Build special case code for calls to hashCode on an object. This call may
4533  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4534  * slightly different code.
4535  */
4536 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4537   assert(is_static == callee()->is_static(), "correct intrinsic selection");
4538   assert(!(is_virtual && is_static), "either virtual, special, or static");
4539 
4540   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4541 
4542   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4543   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4544   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4545   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4546   Node* obj = argument(0);
4547 
4548   if (obj->is_InlineType() || gvn().type(obj)->is_inlinetypeptr()) {
4549     return false;
4550   }
4551 
4552   if (!is_static) {
4553     // Check for hashing null object
4554     obj = null_check_receiver();
4555     if (stopped())  return true;        // unconditionally null
4556     result_reg->init_req(_null_path, top());
4557     result_val->init_req(_null_path, top());
4558   } else {
4559     // Do a null check, and return zero if null.
4560     // System.identityHashCode(null) == 0

4561     Node* null_ctl = top();
4562     obj = null_check_oop(obj, &null_ctl);
4563     result_reg->init_req(_null_path, null_ctl);
4564     result_val->init_req(_null_path, _gvn.intcon(0));
4565   }
4566 
4567   // Unconditionally null?  Then return right away.
4568   if (stopped()) {
4569     set_control( result_reg->in(_null_path));
4570     if (!stopped())
4571       set_result(result_val->in(_null_path));
4572     return true;
4573   }
4574 
4575   // We only go to the fast case code if we pass a number of guards.  The
4576   // paths which do not pass are accumulated in the slow_region.
4577   RegionNode* slow_region = new RegionNode(1);
4578   record_for_igvn(slow_region);
4579 
4580   // If this is a virtual call, we generate a funny guard.  We pull out
4581   // the vtable entry corresponding to hashCode() from the target object.
4582   // If the target method which we are calling happens to be the native
4583   // Object hashCode() method, we pass the guard.  We do not need this
4584   // guard for non-virtual calls -- the caller is known to be the native
4585   // Object hashCode().
4586   if (is_virtual) {
4587     // After null check, get the object's klass.
4588     Node* obj_klass = load_object_klass(obj);
4589     generate_virtual_guard(obj_klass, slow_region);
4590   }
4591 
4592   // Get the header out of the object, use LoadMarkNode when available
4593   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4594   // The control of the load must be NULL. Otherwise, the load can move before
4595   // the null check after castPP removal.
4596   Node* no_ctrl = NULL;
4597   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4598 
4599   // Test the header to see if it is unlocked.
4600   // This also serves as guard against inline types
4601   Node *lock_mask      = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4602   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4603   Node *unlocked_val   = _gvn.MakeConX(markWord::unlocked_value);
4604   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4605   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4606 
4607   generate_slow_guard(test_unlocked, slow_region);
4608 
4609   // Get the hash value and check to see that it has been properly assigned.
4610   // We depend on hash_mask being at most 32 bits and avoid the use of
4611   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4612   // vm: see markWord.hpp.
4613   Node *hash_mask      = _gvn.intcon(markWord::hash_mask);
4614   Node *hash_shift     = _gvn.intcon(markWord::hash_shift);
4615   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4616   // This hack lets the hash bits live anywhere in the mark object now, as long
4617   // as the shift drops the relevant bits into the low 32 bits.  Note that
4618   // Java spec says that HashCode is an int so there's no point in capturing
4619   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4620   hshifted_header      = ConvX2I(hshifted_header);
4621   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));

4647     // this->control() comes from set_results_for_java_call
4648     result_reg->init_req(_slow_path, control());
4649     result_val->init_req(_slow_path, slow_result);
4650     result_io  ->set_req(_slow_path, i_o());
4651     result_mem ->set_req(_slow_path, reset_memory());
4652   }
4653 
4654   // Return the combined state.
4655   set_i_o(        _gvn.transform(result_io)  );
4656   set_all_memory( _gvn.transform(result_mem));
4657 
4658   set_result(result_reg, result_val);
4659   return true;
4660 }
4661 
4662 //---------------------------inline_native_getClass----------------------------
4663 // public final native Class<?> java.lang.Object.getClass();
4664 //
4665 // Build special case code for calls to getClass on an object.
4666 bool LibraryCallKit::inline_native_getClass() {
4667   Node* obj = argument(0);
4668   if (obj->is_InlineTypeBase()) {
4669     const Type* t = _gvn.type(obj);
4670     if (t->maybe_null()) {
4671       null_check(obj);
4672     }
4673     set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4674     return true;
4675   }
4676   obj = null_check_receiver();
4677   if (stopped())  return true;
4678   set_result(load_mirror_from_klass(load_object_klass(obj)));
4679   return true;
4680 }
4681 
4682 //-----------------inline_native_Reflection_getCallerClass---------------------
4683 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4684 //
4685 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4686 //
4687 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4688 // in that it must skip particular security frames and checks for
4689 // caller sensitive methods.
4690 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4691 #ifndef PRODUCT
4692   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4693     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4694   }
4695 #endif
4696 

5013 //  not cloneable or finalizer => slow path to out-of-line Object.clone
5014 //
5015 // The general case has two steps, allocation and copying.
5016 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5017 //
5018 // Copying also has two cases, oop arrays and everything else.
5019 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5020 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5021 //
5022 // These steps fold up nicely if and when the cloned object's klass
5023 // can be sharply typed as an object array, a type array, or an instance.
5024 //
5025 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5026   PhiNode* result_val;
5027 
5028   // Set the reexecute bit for the interpreter to reexecute
5029   // the bytecode that invokes Object.clone if deoptimization happens.
5030   { PreserveReexecuteState preexecs(this);
5031     jvms()->set_should_reexecute(true);
5032 
5033     Node* obj = argument(0);
5034     if (obj->is_InlineType()) {
5035       return false;
5036     }
5037 
5038     obj = null_check_receiver();
5039     if (stopped())  return true;
5040 
5041     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5042 
5043     // If we are going to clone an instance, we need its exact type to
5044     // know the number and types of fields to convert the clone to
5045     // loads/stores. Maybe a speculative type can help us.
5046     if (!obj_type->klass_is_exact() &&
5047         obj_type->speculative_type() != NULL &&
5048         obj_type->speculative_type()->is_instance_klass() &&
5049         !obj_type->speculative_type()->is_inlinetype()) {
5050       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5051       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5052           !spec_ik->has_injected_fields()) {
5053         if (!obj_type->isa_instptr() ||
5054             obj_type->is_instptr()->instance_klass()->has_subklass()) {
5055           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5056         }
5057       }
5058     }
5059 
5060     // Conservatively insert a memory barrier on all memory slices.
5061     // Do not let writes into the original float below the clone.
5062     insert_mem_bar(Op_MemBarCPUOrder);
5063 
5064     // paths into result_reg:
5065     enum {
5066       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
5067       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
5068       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
5069       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
5070       PATH_LIMIT
5071     };
5072     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5073     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5074     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
5075     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5076     record_for_igvn(result_reg);
5077 
5078     Node* obj_klass = load_object_klass(obj);
5079     // We only go to the fast case code if we pass a number of guards.
5080     // The paths which do not pass are accumulated in the slow_region.
5081     RegionNode* slow_region = new RegionNode(1);
5082     record_for_igvn(slow_region);
5083 
5084     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
5085     if (array_ctl != NULL) {
5086       // It's an array.
5087       PreserveJVMState pjvms(this);
5088       set_control(array_ctl);



5089 
5090       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5091       const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5092       if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5093           obj_type->can_be_inline_array() &&
5094           (ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5095         // Flattened inline type array may have object field that would require a
5096         // write barrier. Conservatively, go to slow path.
5097         generate_fair_guard(flat_array_test(obj_klass), slow_region);













5098       }







5099 
5100       if (!stopped()) {
5101         Node* obj_length = load_array_length(obj);
5102         Node* obj_size  = NULL;
5103         Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true);
5104 
5105         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5106         if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5107           // If it is an oop array, it requires very special treatment,
5108           // because gc barriers are required when accessing the array.
5109           Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
5110           if (is_obja != NULL) {
5111             PreserveJVMState pjvms2(this);
5112             set_control(is_obja);
5113             // Generate a direct call to the right arraycopy function(s).
5114             // Clones are always tightly coupled.
5115             ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5116             ac->set_clone_oop_array();
5117             Node* n = _gvn.transform(ac);
5118             assert(n == ac, "cannot disappear");
5119             ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5120 
5121             result_reg->init_req(_objArray_path, control());
5122             result_val->init_req(_objArray_path, alloc_obj);
5123             result_i_o ->set_req(_objArray_path, i_o());
5124             result_mem ->set_req(_objArray_path, reset_memory());
5125           }
5126         }
5127         // Otherwise, there are no barriers to worry about.
5128         // (We can dispense with card marks if we know the allocation
5129         //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
5130         //  causes the non-eden paths to take compensating steps to
5131         //  simulate a fresh allocation, so that no further
5132         //  card marks are required in compiled code to initialize
5133         //  the object.)
5134 
5135         if (!stopped()) {
5136           copy_to_clone(obj, alloc_obj, obj_size, true);
5137 
5138           // Present the results of the copy.
5139           result_reg->init_req(_array_path, control());
5140           result_val->init_req(_array_path, alloc_obj);
5141           result_i_o ->set_req(_array_path, i_o());
5142           result_mem ->set_req(_array_path, reset_memory());
5143         }
5144       }
5145     }
5146 




5147     if (!stopped()) {
5148       // It's an instance (we did array above).  Make the slow-path tests.
5149       // If this is a virtual call, we generate a funny guard.  We grab
5150       // the vtable entry corresponding to clone() from the target object.
5151       // If the target method which we are calling happens to be the
5152       // Object clone() method, we pass the guard.  We do not need this
5153       // guard for non-virtual calls; the caller is known to be the native
5154       // Object clone().
5155       if (is_virtual) {
5156         generate_virtual_guard(obj_klass, slow_region);
5157       }
5158 
5159       // The object must be easily cloneable and must not have a finalizer.
5160       // Both of these conditions may be checked in a single test.
5161       // We could optimize the test further, but we don't care.
5162       generate_access_flags_guard(obj_klass,
5163                                   // Test both conditions:
5164                                   JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5165                                   // Must be cloneable but not finalizer:
5166                                   JVM_ACC_IS_CLONEABLE_FAST,

5287 // array in the heap that GCs wouldn't expect. Move the allocation
5288 // after the traps so we don't allocate the array if we
5289 // deoptimize. This is possible because tightly_coupled_allocation()
5290 // guarantees there's no observer of the allocated array at this point
5291 // and the control flow is simple enough.
5292 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
5293                                                     int saved_reexecute_sp, uint new_idx) {
5294   if (saved_jvms != NULL && !stopped()) {
5295     assert(alloc != NULL, "only with a tightly coupled allocation");
5296     // restore JVM state to the state at the arraycopy
5297     saved_jvms->map()->set_control(map()->control());
5298     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
5299     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
5300     // If we've improved the types of some nodes (null check) while
5301     // emitting the guards, propagate them to the current state
5302     map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
5303     set_jvms(saved_jvms);
5304     _reexecute_sp = saved_reexecute_sp;
5305 
5306     // Remove the allocation from above the guards
5307     CallProjections* callprojs = alloc->extract_projections(true);

5308     InitializeNode* init = alloc->initialization();
5309     Node* alloc_mem = alloc->in(TypeFunc::Memory);
5310     C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5311     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5312 
5313     // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5314     // the allocation (i.e. is only valid if the allocation succeeds):
5315     // 1) replace CastIINode with AllocateArrayNode's length here
5316     // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5317     //
5318     // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5319     // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5320     Node* init_control = init->proj_out(TypeFunc::Control);
5321     Node* alloc_length = alloc->Ideal_length();
5322 #ifdef ASSERT
5323     Node* prev_cast = NULL;
5324 #endif
5325     for (uint i = 0; i < init_control->outcnt(); i++) {
5326       Node* init_out = init_control->raw_out(i);
5327       if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5328 #ifdef ASSERT
5329         if (prev_cast == NULL) {
5330           prev_cast = init_out;

5332           if (prev_cast->cmp(*init_out) == false) {
5333             prev_cast->dump();
5334             init_out->dump();
5335             assert(false, "not equal CastIINode");
5336           }
5337         }
5338 #endif
5339         C->gvn_replace_by(init_out, alloc_length);
5340       }
5341     }
5342     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5343 
5344     // move the allocation here (after the guards)
5345     _gvn.hash_delete(alloc);
5346     alloc->set_req(TypeFunc::Control, control());
5347     alloc->set_req(TypeFunc::I_O, i_o());
5348     Node *mem = reset_memory();
5349     set_all_memory(mem);
5350     alloc->set_req(TypeFunc::Memory, mem);
5351     set_control(init->proj_out_or_null(TypeFunc::Control));
5352     set_i_o(callprojs->fallthrough_ioproj);
5353 
5354     // Update memory as done in GraphKit::set_output_for_allocation()
5355     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5356     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5357     if (ary_type->isa_aryptr() && length_type != NULL) {
5358       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5359     }
5360     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5361     int            elemidx  = C->get_alias_index(telemref);
5362     set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5363     set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5364 
5365     Node* allocx = _gvn.transform(alloc);
5366     assert(allocx == alloc, "where has the allocation gone?");
5367     assert(dest->is_CheckCastPP(), "not an allocation result?");
5368 
5369     _gvn.hash_delete(dest);
5370     dest->set_req(0, control());
5371     Node* destx = _gvn.transform(dest);
5372     assert(destx == dest, "where has the allocation result gone?");

5508       // Do we have the exact type of dest?
5509       bool could_have_dest = dest_spec;
5510       ciKlass* src_k = NULL;
5511       ciKlass* dest_k = NULL;
5512       if (!src_spec) {
5513         src_k = src_type->speculative_type_not_null();
5514         if (src_k != NULL && src_k->is_array_klass()) {
5515           could_have_src = true;
5516         }
5517       }
5518       if (!dest_spec) {
5519         dest_k = dest_type->speculative_type_not_null();
5520         if (dest_k != NULL && dest_k->is_array_klass()) {
5521           could_have_dest = true;
5522         }
5523       }
5524       if (could_have_src && could_have_dest) {
5525         // If we can have both exact types, emit the missing guards
5526         if (could_have_src && !src_spec) {
5527           src = maybe_cast_profiled_obj(src, src_k, true);
5528           src_type = _gvn.type(src);
5529           top_src = src_type->isa_aryptr();
5530         }
5531         if (could_have_dest && !dest_spec) {
5532           dest = maybe_cast_profiled_obj(dest, dest_k, true);
5533           dest_type = _gvn.type(dest);
5534           top_dest = dest_type->isa_aryptr();
5535         }
5536       }
5537     }
5538   }
5539 
5540   ciMethod* trap_method = method();
5541   int trap_bci = bci();
5542   if (saved_jvms != NULL) {
5543     trap_method = alloc->jvms()->method();
5544     trap_bci = alloc->jvms()->bci();
5545   }
5546 
5547   bool negative_length_guard_generated = false;
5548 
5549   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5550       can_emit_guards && !src->is_top() && !dest->is_top()) {

5551     // validate arguments: enables transformation the ArrayCopyNode
5552     validated = true;
5553 
5554     RegionNode* slow_region = new RegionNode(1);
5555     record_for_igvn(slow_region);
5556 
5557     // (1) src and dest are arrays.
5558     generate_non_array_guard(load_object_klass(src), slow_region);
5559     generate_non_array_guard(load_object_klass(dest), slow_region);
5560 
5561     // (2) src and dest arrays must have elements of the same BasicType
5562     // done at macro expansion or at Ideal transformation time
5563 
5564     // (4) src_offset must not be negative.
5565     generate_negative_guard(src_offset, slow_region);
5566 
5567     // (5) dest_offset must not be negative.
5568     generate_negative_guard(dest_offset, slow_region);
5569 
5570     // (7) src_offset + length must not exceed length of src.

5573                          slow_region);
5574 
5575     // (8) dest_offset + length must not exceed length of dest.
5576     generate_limit_guard(dest_offset, length,
5577                          load_array_length(dest),
5578                          slow_region);
5579 
5580     // (6) length must not be negative.
5581     // This is also checked in generate_arraycopy() during macro expansion, but
5582     // we also have to check it here for the case where the ArrayCopyNode will
5583     // be eliminated by Escape Analysis.
5584     if (EliminateAllocations) {
5585       generate_negative_guard(length, slow_region);
5586       negative_length_guard_generated = true;
5587     }
5588 
5589     // (9) each element of an oop array must be assignable
5590     Node* dest_klass = load_object_klass(dest);
5591     if (src != dest) {
5592       Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5593       slow_region->add_req(not_subtype_ctrl);
5594     }
5595 
5596     const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5597     const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5598     if (toop->isa_aryptr() != NULL) {
5599       toop = toop->is_aryptr()->cast_to_not_flat(false)->cast_to_not_null_free(false);
5600     }
5601     src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5602     src_type = _gvn.type(src);
5603     top_src  = src_type->isa_aryptr();
5604 
5605     // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
5606     if (!stopped() && UseFlatArray) {
5607       // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
5608       assert(top_dest == NULL || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
5609       if (top_src != NULL && top_src->is_flat()) {
5610         // Src is flat, check that dest is flat as well
5611         if (top_dest != NULL && !top_dest->is_flat()) {
5612           generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
5613           // Since dest is flat and src <: dest, dest must have the same type as src.
5614           top_dest = top_src->cast_to_exactness(false);
5615           assert(top_dest->is_flat(), "dest must be flat");
5616           dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
5617         }
5618       } else if (top_src == NULL || !top_src->is_not_flat()) {
5619         // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
5620         // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
5621         assert(top_dest == NULL || !top_dest->is_flat(), "dest array must not be flat");
5622         generate_fair_guard(flat_array_test(src), slow_region);
5623         if (top_src != NULL) {
5624           top_src = top_src->cast_to_not_flat();
5625           src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
5626         }
5627       }
5628     }
5629 
5630     {
5631       PreserveJVMState pjvms(this);
5632       set_control(_gvn.transform(slow_region));
5633       uncommon_trap(Deoptimization::Reason_intrinsic,
5634                     Deoptimization::Action_make_not_entrant);
5635       assert(stopped(), "Should be stopped");
5636     }




5637   }
5638 
5639   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5640 
5641   if (stopped()) {
5642     return true;
5643   }
5644 
5645   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5646                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5647                                           // so the compiler has a chance to eliminate them: during macro expansion,
5648                                           // we have to set their control (CastPP nodes are eliminated).
5649                                           load_object_klass(src), load_object_klass(dest),
5650                                           load_array_length(src), load_array_length(dest));
5651 
5652   ac->set_arraycopy(validated);
5653 
5654   Node* n = _gvn.transform(ac);
5655   if (n == ac) {
5656     ac->connect_outputs(this);
< prev index next >