7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "jfr/support/jfrIntrinsics.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/cfgnode.hpp"
41 #include "opto/convertnode.hpp"
42 #include "opto/countbitsnode.hpp"
43 #include "opto/idealKit.hpp"
44 #include "opto/library_call.hpp"
45 #include "opto/mathexactnode.hpp"
46 #include "opto/mulnode.hpp"
308 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
309 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
310 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
311 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
312
313 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
314 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
315
316 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
317
318 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
319 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
320 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
321 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
322
323 case vmIntrinsics::_compressStringC:
324 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
325 case vmIntrinsics::_inflateStringC:
326 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
327
328 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
329 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
330 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
331 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
332 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
333 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
334 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
335 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
336 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
337
338 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
339 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
340 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
341 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
342 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
343 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
344 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
345 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
346 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
347
348 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
349 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
350 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
351 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
352 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
353 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
354 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
355 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
356 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
357
358 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
359 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
360 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
361 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
362 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
363 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
364 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
365 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
366 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
490 "notifyJvmtiEnd", false, true);
491 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
492 "notifyJvmtiMount", false, false);
493 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
494 "notifyJvmtiUnmount", false, false);
495 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
496 #endif
497
498 #ifdef JFR_HAVE_INTRINSICS
499 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
500 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
501 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
502 #endif
503 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
504 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
505 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
506 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
507 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
508 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
509 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
510 case vmIntrinsics::_getLength: return inline_native_getLength();
511 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
512 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
513 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
514 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
515 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
516 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
517 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
518
519 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
520 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
521
522 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
523
524 case vmIntrinsics::_isInstance:
525 case vmIntrinsics::_getModifiers:
526 case vmIntrinsics::_isInterface:
527 case vmIntrinsics::_isArray:
528 case vmIntrinsics::_isPrimitive:
529 case vmIntrinsics::_isHidden:
530 case vmIntrinsics::_getSuperclass:
531 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
532
533 case vmIntrinsics::_floatToRawIntBits:
534 case vmIntrinsics::_floatToIntBits:
535 case vmIntrinsics::_intBitsToFloat:
536 case vmIntrinsics::_doubleToRawLongBits:
537 case vmIntrinsics::_doubleToLongBits:
538 case vmIntrinsics::_longBitsToDouble:
539 case vmIntrinsics::_floatToFloat16:
540 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2189 case vmIntrinsics::_remainderUnsigned_l: {
2190 zero_check_long(argument(2));
2191 // Compile-time detect of null-exception
2192 if (stopped()) {
2193 return true; // keep the graph constructed so far
2194 }
2195 n = new UModLNode(control(), argument(0), argument(2));
2196 break;
2197 }
2198 default: fatal_unexpected_iid(id); break;
2199 }
2200 set_result(_gvn.transform(n));
2201 return true;
2202 }
2203
2204 //----------------------------inline_unsafe_access----------------------------
2205
2206 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2207 // Attempt to infer a sharper value type from the offset and base type.
2208 ciKlass* sharpened_klass = nullptr;
2209
2210 // See if it is an instance field, with an object type.
2211 if (alias_type->field() != nullptr) {
2212 if (alias_type->field()->type()->is_klass()) {
2213 sharpened_klass = alias_type->field()->type()->as_klass();
2214 }
2215 }
2216
2217 const TypeOopPtr* result = nullptr;
2218 // See if it is a narrow oop array.
2219 if (adr_type->isa_aryptr()) {
2220 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2221 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2222 if (elem_type != nullptr && elem_type->is_loaded()) {
2223 // Sharpen the value type.
2224 result = elem_type;
2225 }
2226 }
2227 }
2228
2229 // The sharpened class might be unloaded if there is no class loader
2230 // contraint in place.
2231 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2232 // Sharpen the value type.
2233 result = TypeOopPtr::make_from_klass(sharpened_klass);
2234 }
2235 if (result != nullptr) {
2236 #ifndef PRODUCT
2237 if (C->print_intrinsics() || C->print_inlining()) {
2238 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2239 tty->print(" sharpened value: "); result->dump(); tty->cr();
2240 }
2241 #endif
2242 }
2243 return result;
2244 }
2245
2246 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2247 switch (kind) {
2248 case Relaxed:
2249 return MO_UNORDERED;
2250 case Opaque:
2251 return MO_RELAXED;
2252 case Acquire:
2253 return MO_ACQUIRE;
2254 case Release:
2255 return MO_RELEASE;
2256 case Volatile:
2257 return MO_SEQ_CST;
2258 default:
2259 ShouldNotReachHere();
2260 return 0;
2261 }
2262 }
2263
2264 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2265 if (callee()->is_static()) return false; // caller must have the capability!
2266 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2267 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2268 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2269 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2270
2271 if (is_reference_type(type)) {
2272 decorators |= ON_UNKNOWN_OOP_REF;
2273 }
2274
2275 if (unaligned) {
2276 decorators |= C2_UNALIGNED;
2277 }
2278
2279 #ifndef PRODUCT
2280 {
2281 ResourceMark rm;
2282 // Check the signatures.
2283 ciSignature* sig = callee()->signature();
2284 #ifdef ASSERT
2285 if (!is_store) {
2286 // Object getReference(Object base, int/long offset), etc.
2287 BasicType rtype = sig->return_type()->basic_type();
2288 assert(rtype == type, "getter must return the expected value");
2289 assert(sig->count() == 2, "oop getter has 2 arguments");
2290 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2291 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2292 } else {
2293 // void putReference(Object base, int/long offset, Object x), etc.
2294 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2295 assert(sig->count() == 3, "oop putter has 3 arguments");
2296 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2297 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2298 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2299 assert(vtype == type, "putter must accept the expected value");
2300 }
2301 #endif // ASSERT
2302 }
2303 #endif //PRODUCT
2304
2305 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2306
2307 Node* receiver = argument(0); // type: oop
2308
2309 // Build address expression.
2310 Node* heap_base_oop = top();
2311
2312 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2313 Node* base = argument(1); // type: oop
2314 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2315 Node* offset = argument(2); // type: long
2316 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2317 // to be plain byte offsets, which are also the same as those accepted
2318 // by oopDesc::field_addr.
2319 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2320 "fieldOffset must be byte-scaled");
2321 // 32-bit machines ignore the high half!
2322 offset = ConvL2X(offset);
2323
2324 // Save state and restore on bailout
2325 uint old_sp = sp();
2326 SafePointNode* old_map = clone_map();
2327
2328 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2329
2330 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2331 if (type != T_OBJECT) {
2332 decorators |= IN_NATIVE; // off-heap primitive access
2333 } else {
2334 set_map(old_map);
2335 set_sp(old_sp);
2336 return false; // off-heap oop accesses are not supported
2337 }
2338 } else {
2339 heap_base_oop = base; // on-heap or mixed access
2340 }
2341
2342 // Can base be null? Otherwise, always on-heap access.
2343 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2344
2345 if (!can_access_non_heap) {
2346 decorators |= IN_HEAP;
2347 }
2348
2349 Node* val = is_store ? argument(4) : nullptr;
2350
2351 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2352 if (adr_type == TypePtr::NULL_PTR) {
2353 set_map(old_map);
2354 set_sp(old_sp);
2355 return false; // off-heap access with zero address
2356 }
2357
2358 // Try to categorize the address.
2359 Compile::AliasType* alias_type = C->alias_type(adr_type);
2360 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2361
2362 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2363 alias_type->adr_type() == TypeAryPtr::RANGE) {
2364 set_map(old_map);
2365 set_sp(old_sp);
2366 return false; // not supported
2367 }
2368
2369 bool mismatched = false;
2370 BasicType bt = alias_type->basic_type();
2371 if (bt != T_ILLEGAL) {
2372 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2373 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2374 // Alias type doesn't differentiate between byte[] and boolean[]).
2375 // Use address type to get the element type.
2376 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2377 }
2378 if (is_reference_type(bt, true)) {
2379 // accessing an array field with getReference is not a mismatch
2380 bt = T_OBJECT;
2381 }
2382 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2383 // Don't intrinsify mismatched object accesses
2384 set_map(old_map);
2385 set_sp(old_sp);
2386 return false;
2387 }
2388 mismatched = (bt != type);
2389 } else if (alias_type->adr_type()->isa_oopptr()) {
2390 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2391 }
2392
2393 destruct_map_clone(old_map);
2394 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2395
2396 if (mismatched) {
2397 decorators |= C2_MISMATCHED;
2398 }
2399
2400 // First guess at the value type.
2401 const Type *value_type = Type::get_const_basic_type(type);
2402
2403 // Figure out the memory ordering.
2404 decorators |= mo_decorator_for_access_kind(kind);
2405
2406 if (!is_store && type == T_OBJECT) {
2407 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2408 if (tjp != nullptr) {
2409 value_type = tjp;
2410 }
2411 }
2412
2413 receiver = null_check(receiver);
2414 if (stopped()) {
2415 return true;
2416 }
2417 // Heap pointers get a null-check from the interpreter,
2418 // as a courtesy. However, this is not guaranteed by Unsafe,
2419 // and it is not possible to fully distinguish unintended nulls
2420 // from intended ones in this API.
2421
2422 if (!is_store) {
2423 Node* p = nullptr;
2424 // Try to constant fold a load from a constant field
2425 ciField* field = alias_type->field();
2426 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) {
2427 // final or stable field
2428 p = make_constant_from_field(field, heap_base_oop);
2429 }
2430
2431 if (p == nullptr) { // Could not constant fold the load
2432 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2433 // Normalize the value returned by getBoolean in the following cases
2434 if (type == T_BOOLEAN &&
2435 (mismatched ||
2436 heap_base_oop == top() || // - heap_base_oop is null or
2437 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2438 // and the unsafe access is made to large offset
2439 // (i.e., larger than the maximum offset necessary for any
2440 // field access)
2441 ) {
2442 IdealKit ideal = IdealKit(this);
2443 #define __ ideal.
2444 IdealVariable normalized_result(ideal);
2445 __ declarations_done();
2446 __ set(normalized_result, p);
2447 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2448 __ set(normalized_result, ideal.ConI(1));
2449 ideal.end_if();
2450 final_sync(ideal);
2451 p = __ value(normalized_result);
2452 #undef __
2453 }
2454 }
2455 if (type == T_ADDRESS) {
2456 p = gvn().transform(new CastP2XNode(nullptr, p));
2457 p = ConvX2UL(p);
2458 }
2459 // The load node has the control of the preceding MemBarCPUOrder. All
2460 // following nodes will have the control of the MemBarCPUOrder inserted at
2461 // the end of this method. So, pushing the load onto the stack at a later
2462 // point is fine.
2463 set_result(p);
2464 } else {
2465 if (bt == T_ADDRESS) {
2466 // Repackage the long as a pointer.
2467 val = ConvL2X(val);
2468 val = gvn().transform(new CastX2PNode(val));
2469 }
2470 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2471 }
2472
2473 return true;
2474 }
2475
2476 //----------------------------inline_unsafe_load_store----------------------------
2477 // This method serves a couple of different customers (depending on LoadStoreKind):
2478 //
2479 // LS_cmp_swap:
2480 //
2481 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2482 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2483 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2484 //
2485 // LS_cmp_swap_weak:
2486 //
2487 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2488 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2489 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2490 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2491 //
2492 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2661 }
2662 case LS_cmp_swap:
2663 case LS_cmp_swap_weak:
2664 case LS_get_add:
2665 break;
2666 default:
2667 ShouldNotReachHere();
2668 }
2669
2670 // Null check receiver.
2671 receiver = null_check(receiver);
2672 if (stopped()) {
2673 return true;
2674 }
2675
2676 int alias_idx = C->get_alias_index(adr_type);
2677
2678 if (is_reference_type(type)) {
2679 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2680
2681 // Transformation of a value which could be null pointer (CastPP #null)
2682 // could be delayed during Parse (for example, in adjust_map_after_if()).
2683 // Execute transformation here to avoid barrier generation in such case.
2684 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2685 newval = _gvn.makecon(TypePtr::NULL_PTR);
2686
2687 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2688 // Refine the value to a null constant, when it is known to be null
2689 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2690 }
2691 }
2692
2693 Node* result = nullptr;
2694 switch (kind) {
2695 case LS_cmp_exchange: {
2696 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2697 oldval, newval, value_type, type, decorators);
2698 break;
2699 }
2700 case LS_cmp_swap_weak:
2847 Deoptimization::Action_make_not_entrant);
2848 }
2849 if (stopped()) {
2850 return true;
2851 }
2852 #endif //INCLUDE_JVMTI
2853
2854 Node* test = nullptr;
2855 if (LibraryCallKit::klass_needs_init_guard(kls)) {
2856 // Note: The argument might still be an illegal value like
2857 // Serializable.class or Object[].class. The runtime will handle it.
2858 // But we must make an explicit check for initialization.
2859 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
2860 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
2861 // can generate code to load it as unsigned byte.
2862 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2863 Node* bits = intcon(InstanceKlass::fully_initialized);
2864 test = _gvn.transform(new SubINode(inst, bits));
2865 // The 'test' is non-zero if we need to take a slow path.
2866 }
2867
2868 Node* obj = new_instance(kls, test);
2869 set_result(obj);
2870 return true;
2871 }
2872
2873 //------------------------inline_native_time_funcs--------------
2874 // inline code for System.currentTimeMillis() and System.nanoTime()
2875 // these have the same type and signature
2876 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
2877 const TypeFunc* tf = OptoRuntime::void_long_Type();
2878 const TypePtr* no_memory_effects = nullptr;
2879 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2880 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
2881 #ifdef ASSERT
2882 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
2883 assert(value_top == top(), "second value must be top");
2884 #endif
2885 set_result(value);
2886 return true;
2887 }
2888
3601
3602 //------------------------inline_native_setVthread------------------
3603 bool LibraryCallKit::inline_native_setCurrentThread() {
3604 assert(C->method()->changes_current_thread(),
3605 "method changes current Thread but is not annotated ChangesCurrentThread");
3606 Node* arr = argument(1);
3607 Node* thread = _gvn.transform(new ThreadLocalNode());
3608 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3609 Node* thread_obj_handle
3610 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3611 thread_obj_handle = _gvn.transform(thread_obj_handle);
3612 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3613 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3614 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3615 return true;
3616 }
3617
3618 const Type* LibraryCallKit::scopedValueCache_type() {
3619 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3620 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3621 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
3622
3623 // Because we create the scopedValue cache lazily we have to make the
3624 // type of the result BotPTR.
3625 bool xk = etype->klass_is_exact();
3626 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
3627 return objects_type;
3628 }
3629
3630 Node* LibraryCallKit::scopedValueCache_helper() {
3631 Node* thread = _gvn.transform(new ThreadLocalNode());
3632 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3633 // We cannot use immutable_memory() because we might flip onto a
3634 // different carrier thread, at which point we'll need to use that
3635 // carrier thread's cache.
3636 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3637 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3638 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3639 }
3640
3641 //------------------------inline_native_scopedValueCache------------------
3642 bool LibraryCallKit::inline_native_scopedValueCache() {
3643 Node* cache_obj_handle = scopedValueCache_helper();
3644 const Type* objects_type = scopedValueCache_type();
3645 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3646
3647 return true;
3648 }
3649
3650 //------------------------inline_native_setScopedValueCache------------------
3651 bool LibraryCallKit::inline_native_setScopedValueCache() {
3652 Node* arr = argument(0);
3653 Node* cache_obj_handle = scopedValueCache_helper();
3654 const Type* objects_type = scopedValueCache_type();
3655
3656 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3657 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3658
3659 return true;
3660 }
3661
3662 //---------------------------load_mirror_from_klass----------------------------
3663 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3664 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3665 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3666 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3667 // mirror = ((OopHandle)mirror)->resolve();
3668 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
3669 }
3670
3671 //-----------------------load_klass_from_mirror_common-------------------------
3672 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3673 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3674 // and branch to the given path on the region.
3675 // If never_see_null, take an uncommon trap on null, so we can optimistically
3676 // compile for the non-null case.
3677 // If the region is null, force never_see_null = true.
3678 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3679 bool never_see_null,
3680 RegionNode* region,
3681 int null_path,
3682 int offset) {
3683 if (region == nullptr) never_see_null = true;
3684 Node* p = basic_plus_adr(mirror, offset);
3685 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3686 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3687 Node* null_ctl = top();
3688 kls = null_check_oop(kls, &null_ctl, never_see_null);
3689 if (region != nullptr) {
3690 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3693 assert(null_ctl == top(), "no loose ends");
3694 }
3695 return kls;
3696 }
3697
3698 //--------------------(inline_native_Class_query helpers)---------------------
3699 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3700 // Fall through if (mods & mask) == bits, take the guard otherwise.
3701 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3702 // Branch around if the given klass has the given modifier bit set.
3703 // Like generate_guard, adds a new path onto the region.
3704 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3705 Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3706 Node* mask = intcon(modifier_mask);
3707 Node* bits = intcon(modifier_bits);
3708 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3709 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3710 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3711 return generate_fair_guard(bol, region);
3712 }
3713 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3714 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3715 }
3716 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3717 return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3718 }
3719
3720 //-------------------------inline_native_Class_query-------------------
3721 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3722 const Type* return_type = TypeInt::BOOL;
3723 Node* prim_return_value = top(); // what happens if it's a primitive class?
3724 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3725 bool expect_prim = false; // most of these guys expect to work on refs
3726
3727 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3728
3729 Node* mirror = argument(0);
3730 Node* obj = top();
3731
3732 switch (id) {
3886
3887 case vmIntrinsics::_getClassAccessFlags:
3888 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3889 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
3890 break;
3891
3892 default:
3893 fatal_unexpected_iid(id);
3894 break;
3895 }
3896
3897 // Fall-through is the normal case of a query to a real class.
3898 phi->init_req(1, query_value);
3899 region->init_req(1, control());
3900
3901 C->set_has_split_ifs(true); // Has chance for split-if optimization
3902 set_result(region, phi);
3903 return true;
3904 }
3905
3906 //-------------------------inline_Class_cast-------------------
3907 bool LibraryCallKit::inline_Class_cast() {
3908 Node* mirror = argument(0); // Class
3909 Node* obj = argument(1);
3910 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3911 if (mirror_con == nullptr) {
3912 return false; // dead path (mirror->is_top()).
3913 }
3914 if (obj == nullptr || obj->is_top()) {
3915 return false; // dead path
3916 }
3917 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3918
3919 // First, see if Class.cast() can be folded statically.
3920 // java_mirror_type() returns non-null for compile-time Class constants.
3921 ciType* tm = mirror_con->java_mirror_type();
3922 if (tm != nullptr && tm->is_klass() &&
3923 tp != nullptr) {
3924 if (!tp->is_loaded()) {
3925 // Don't use intrinsic when class is not loaded.
3937 return false;
3938 }
3939 }
3940 }
3941
3942 // Bailout intrinsic and do normal inlining if exception path is frequent.
3943 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3944 return false;
3945 }
3946
3947 // Generate dynamic checks.
3948 // Class.cast() is java implementation of _checkcast bytecode.
3949 // Do checkcast (Parse::do_checkcast()) optimizations here.
3950
3951 mirror = null_check(mirror);
3952 // If mirror is dead, only null-path is taken.
3953 if (stopped()) {
3954 return true;
3955 }
3956
3957 // Not-subtype or the mirror's klass ptr is null (in case it is a primitive).
3958 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3959 RegionNode* region = new RegionNode(PATH_LIMIT);
3960 record_for_igvn(region);
3961
3962 // Now load the mirror's klass metaobject, and null-check it.
3963 // If kls is null, we have a primitive mirror and
3964 // nothing is an instance of a primitive type.
3965 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3966
3967 Node* res = top();
3968 if (!stopped()) {
3969 Node* bad_type_ctrl = top();
3970 // Do checkcast optimizations.
3971 res = gen_checkcast(obj, kls, &bad_type_ctrl);
3972 region->init_req(_bad_type_path, bad_type_ctrl);
3973 }
3974 if (region->in(_prim_path) != top() ||
3975 region->in(_bad_type_path) != top()) {
3976 // Let Interpreter throw ClassCastException.
3977 PreserveJVMState pjvms(this);
3978 set_control(_gvn.transform(region));
3979 uncommon_trap(Deoptimization::Reason_intrinsic,
3980 Deoptimization::Action_maybe_recompile);
3981 }
3982 if (!stopped()) {
3983 set_result(res);
3984 }
3985 return true;
3986 }
3987
3988
3989 //--------------------------inline_native_subtype_check------------------------
3990 // This intrinsic takes the JNI calls out of the heart of
3991 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3992 bool LibraryCallKit::inline_native_subtype_check() {
3993 // Pull both arguments off the stack.
3994 Node* args[2]; // two java.lang.Class mirrors: superc, subc
3995 args[0] = argument(0);
3996 args[1] = argument(1);
3997 Node* klasses[2]; // corresponding Klasses: superk, subk
3998 klasses[0] = klasses[1] = top();
3999
4000 enum {
4001 // A full decision tree on {superc is prim, subc is prim}:
4002 _prim_0_path = 1, // {P,N} => false
4003 // {P,P} & superc!=subc => false
4004 _prim_same_path, // {P,P} & superc==subc => true
4005 _prim_1_path, // {N,P} => false
4006 _ref_subtype_path, // {N,N} & subtype check wins => true
4007 _both_ref_path, // {N,N} & subtype check loses => false
4008 PATH_LIMIT
4009 };
4010
4011 RegionNode* region = new RegionNode(PATH_LIMIT);
4012 Node* phi = new PhiNode(region, TypeInt::BOOL);
4013 record_for_igvn(region);
4014
4015 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4016 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4017 int class_klass_offset = java_lang_Class::klass_offset();
4018
4019 // First null-check both mirrors and load each mirror's klass metaobject.
4020 int which_arg;
4021 for (which_arg = 0; which_arg <= 1; which_arg++) {
4022 Node* arg = args[which_arg];
4023 arg = null_check(arg);
4024 if (stopped()) break;
4025 args[which_arg] = arg;
4026
4027 Node* p = basic_plus_adr(arg, class_klass_offset);
4028 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4029 klasses[which_arg] = _gvn.transform(kls);
4030 }
4031
4032 // Having loaded both klasses, test each for null.
4033 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4034 for (which_arg = 0; which_arg <= 1; which_arg++) {
4035 Node* kls = klasses[which_arg];
4036 Node* null_ctl = top();
4037 kls = null_check_oop(kls, &null_ctl, never_see_null);
4038 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
4039 region->init_req(prim_path, null_ctl);
4040 if (stopped()) break;
4041 klasses[which_arg] = kls;
4042 }
4043
4044 if (!stopped()) {
4045 // now we have two reference types, in klasses[0..1]
4046 Node* subk = klasses[1]; // the argument to isAssignableFrom
4047 Node* superk = klasses[0]; // the receiver
4048 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4049 // now we have a successful reference subtype check
4050 region->set_req(_ref_subtype_path, control());
4051 }
4052
4053 // If both operands are primitive (both klasses null), then
4054 // we must return true when they are identical primitives.
4055 // It is convenient to test this after the first null klass check.
4056 set_control(region->in(_prim_0_path)); // go back to first null check
4057 if (!stopped()) {
4058 // Since superc is primitive, make a guard for the superc==subc case.
4059 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4060 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4061 generate_guard(bol_eq, region, PROB_FAIR);
4062 if (region->req() == PATH_LIMIT+1) {
4063 // A guard was added. If the added guard is taken, superc==subc.
4064 region->swap_edges(PATH_LIMIT, _prim_same_path);
4065 region->del_req(PATH_LIMIT);
4066 }
4067 region->set_req(_prim_0_path, control()); // Not equal after all.
4068 }
4069
4070 // these are the only paths that produce 'true':
4071 phi->set_req(_prim_same_path, intcon(1));
4072 phi->set_req(_ref_subtype_path, intcon(1));
4073
4074 // pull together the cases:
4075 assert(region->req() == PATH_LIMIT, "sane region");
4076 for (uint i = 1; i < region->req(); i++) {
4077 Node* ctl = region->in(i);
4078 if (ctl == nullptr || ctl == top()) {
4079 region->set_req(i, top());
4080 phi ->set_req(i, top());
4081 } else if (phi->in(i) == nullptr) {
4082 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4083 }
4084 }
4085
4086 set_control(_gvn.transform(region));
4087 set_result(_gvn.transform(phi));
4088 return true;
4089 }
4090
4091 //---------------------generate_array_guard_common------------------------
4092 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
4093 bool obj_array, bool not_array) {
4094
4095 if (stopped()) {
4096 return nullptr;
4097 }
4098
4099 // If obj_array/non_array==false/false:
4100 // Branch around if the given klass is in fact an array (either obj or prim).
4101 // If obj_array/non_array==false/true:
4102 // Branch around if the given klass is not an array klass of any kind.
4103 // If obj_array/non_array==true/true:
4104 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
4105 // If obj_array/non_array==true/false:
4106 // Branch around if the kls is an oop array (Object[] or subtype)
4107 //
4108 // Like generate_guard, adds a new path onto the region.
4109 jint layout_con = 0;
4110 Node* layout_val = get_layout_helper(kls, layout_con);
4111 if (layout_val == nullptr) {
4112 bool query = (obj_array
4113 ? Klass::layout_helper_is_objArray(layout_con)
4114 : Klass::layout_helper_is_array(layout_con));
4115 if (query == not_array) {
4116 return nullptr; // never a branch
4117 } else { // always a branch
4118 Node* always_branch = control();
4119 if (region != nullptr)
4120 region->add_req(always_branch);
4121 set_control(top());
4122 return always_branch;
4123 }
4124 }
4125 // Now test the correct condition.
4126 jint nval = (obj_array
4127 ? (jint)(Klass::_lh_array_tag_type_value
4128 << Klass::_lh_array_tag_shift)
4129 : Klass::_lh_neutral_value);
4130 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4131 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
4132 // invert the test if we are looking for a non-array
4133 if (not_array) btest = BoolTest(btest).negate();
4134 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4135 return generate_fair_guard(bol, region);
4136 }
4137
4138
4139 //-----------------------inline_native_newArray--------------------------
4140 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
4141 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4142 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4143 Node* mirror;
4144 Node* count_val;
4145 if (uninitialized) {
4146 null_check_receiver();
4147 mirror = argument(1);
4148 count_val = argument(2);
4149 } else {
4150 mirror = argument(0);
4151 count_val = argument(1);
4152 }
4153
4154 mirror = null_check(mirror);
4155 // If mirror or obj is dead, only null-path is taken.
4156 if (stopped()) return true;
4157
4158 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4159 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4160 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4266 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4267 { PreserveReexecuteState preexecs(this);
4268 jvms()->set_should_reexecute(true);
4269
4270 array_type_mirror = null_check(array_type_mirror);
4271 original = null_check(original);
4272
4273 // Check if a null path was taken unconditionally.
4274 if (stopped()) return true;
4275
4276 Node* orig_length = load_array_length(original);
4277
4278 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4279 klass_node = null_check(klass_node);
4280
4281 RegionNode* bailout = new RegionNode(1);
4282 record_for_igvn(bailout);
4283
4284 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4285 // Bail out if that is so.
4286 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
4287 if (not_objArray != nullptr) {
4288 // Improve the klass node's type from the new optimistic assumption:
4289 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4290 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
4291 Node* cast = new CastPPNode(klass_node, akls);
4292 cast->init_req(0, control());
4293 klass_node = _gvn.transform(cast);
4294 }
4295
4296 // Bail out if either start or end is negative.
4297 generate_negative_guard(start, bailout, &start);
4298 generate_negative_guard(end, bailout, &end);
4299
4300 Node* length = end;
4301 if (_gvn.type(start) != TypeInt::ZERO) {
4302 length = _gvn.transform(new SubINode(end, start));
4303 }
4304
4305 // Bail out if length is negative.
4306 // Without this the new_array would throw
4307 // NegativeArraySizeException but IllegalArgumentException is what
4308 // should be thrown
4309 generate_negative_guard(length, bailout, &length);
4310
4311 if (bailout->req() > 1) {
4312 PreserveJVMState pjvms(this);
4313 set_control(_gvn.transform(bailout));
4314 uncommon_trap(Deoptimization::Reason_intrinsic,
4315 Deoptimization::Action_maybe_recompile);
4316 }
4317
4318 if (!stopped()) {
4319 // How many elements will we copy from the original?
4320 // The answer is MinI(orig_length - start, length).
4321 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4322 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4323
4324 // Generate a direct call to the right arraycopy function(s).
4325 // We know the copy is disjoint but we might not know if the
4326 // oop stores need checking.
4327 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4328 // This will fail a store-check if x contains any non-nulls.
4329
4330 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4333 // to the copyOf to be validated, including that the copy to the
4334 // new array won't trigger an ArrayStoreException. That subtype
4335 // check can be optimized if we know something on the type of
4336 // the input array from type speculation.
4337 if (_gvn.type(klass_node)->singleton()) {
4338 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4339 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4340
4341 int test = C->static_subtype_check(superk, subk);
4342 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4343 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4344 if (t_original->speculative_type() != nullptr) {
4345 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4346 }
4347 }
4348 }
4349
4350 bool validated = false;
4351 // Reason_class_check rather than Reason_intrinsic because we
4352 // want to intrinsify even if this traps.
4353 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4354 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4355
4356 if (not_subtype_ctrl != top()) {
4357 PreserveJVMState pjvms(this);
4358 set_control(not_subtype_ctrl);
4359 uncommon_trap(Deoptimization::Reason_class_check,
4360 Deoptimization::Action_make_not_entrant);
4361 assert(stopped(), "Should be stopped");
4362 }
4363 validated = true;
4364 }
4365
4366 if (!stopped()) {
4367 newcopy = new_array(klass_node, length, 0); // no arguments to push
4368
4369 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4370 load_object_klass(original), klass_node);
4371 if (!is_copyOfRange) {
4372 ac->set_copyof(validated);
4373 } else {
4419
4420 //-----------------------generate_method_call----------------------------
4421 // Use generate_method_call to make a slow-call to the real
4422 // method if the fast path fails. An alternative would be to
4423 // use a stub like OptoRuntime::slow_arraycopy_Java.
4424 // This only works for expanding the current library call,
4425 // not another intrinsic. (E.g., don't use this for making an
4426 // arraycopy call inside of the copyOf intrinsic.)
4427 CallJavaNode*
4428 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4429 // When compiling the intrinsic method itself, do not use this technique.
4430 guarantee(callee() != C->method(), "cannot make slow-call to self");
4431
4432 ciMethod* method = callee();
4433 // ensure the JVMS we have will be correct for this call
4434 guarantee(method_id == method->intrinsic_id(), "must match");
4435
4436 const TypeFunc* tf = TypeFunc::make(method);
4437 if (res_not_null) {
4438 assert(tf->return_type() == T_OBJECT, "");
4439 const TypeTuple* range = tf->range();
4440 const Type** fields = TypeTuple::fields(range->cnt());
4441 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4442 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4443 tf = TypeFunc::make(tf->domain(), new_range);
4444 }
4445 CallJavaNode* slow_call;
4446 if (is_static) {
4447 assert(!is_virtual, "");
4448 slow_call = new CallStaticJavaNode(C, tf,
4449 SharedRuntime::get_resolve_static_call_stub(), method);
4450 } else if (is_virtual) {
4451 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4452 int vtable_index = Method::invalid_vtable_index;
4453 if (UseInlineCaches) {
4454 // Suppress the vtable call
4455 } else {
4456 // hashCode and clone are not a miranda methods,
4457 // so the vtable index is fixed.
4458 // No need to use the linkResolver to get it.
4459 vtable_index = method->vtable_index();
4460 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4461 "bad index %d", vtable_index);
4462 }
4463 slow_call = new CallDynamicJavaNode(tf,
4480 set_edges_for_java_call(slow_call);
4481 return slow_call;
4482 }
4483
4484
4485 /**
4486 * Build special case code for calls to hashCode on an object. This call may
4487 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4488 * slightly different code.
4489 */
4490 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4491 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4492 assert(!(is_virtual && is_static), "either virtual, special, or static");
4493
4494 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4495
4496 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4497 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4498 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4499 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4500 Node* obj = nullptr;
4501 if (!is_static) {
4502 // Check for hashing null object
4503 obj = null_check_receiver();
4504 if (stopped()) return true; // unconditionally null
4505 result_reg->init_req(_null_path, top());
4506 result_val->init_req(_null_path, top());
4507 } else {
4508 // Do a null check, and return zero if null.
4509 // System.identityHashCode(null) == 0
4510 obj = argument(0);
4511 Node* null_ctl = top();
4512 obj = null_check_oop(obj, &null_ctl);
4513 result_reg->init_req(_null_path, null_ctl);
4514 result_val->init_req(_null_path, _gvn.intcon(0));
4515 }
4516
4517 // Unconditionally null? Then return right away.
4518 if (stopped()) {
4519 set_control( result_reg->in(_null_path));
4520 if (!stopped())
4521 set_result(result_val->in(_null_path));
4522 return true;
4523 }
4524
4525 // We only go to the fast case code if we pass a number of guards. The
4526 // paths which do not pass are accumulated in the slow_region.
4527 RegionNode* slow_region = new RegionNode(1);
4528 record_for_igvn(slow_region);
4529
4530 // If this is a virtual call, we generate a funny guard. We pull out
4531 // the vtable entry corresponding to hashCode() from the target object.
4532 // If the target method which we are calling happens to be the native
4533 // Object hashCode() method, we pass the guard. We do not need this
4534 // guard for non-virtual calls -- the caller is known to be the native
4535 // Object hashCode().
4536 if (is_virtual) {
4537 // After null check, get the object's klass.
4538 Node* obj_klass = load_object_klass(obj);
4539 generate_virtual_guard(obj_klass, slow_region);
4540 }
4541
4542 // Get the header out of the object, use LoadMarkNode when available
4543 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4544 // The control of the load must be null. Otherwise, the load can move before
4545 // the null check after castPP removal.
4546 Node* no_ctrl = nullptr;
4547 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4548
4549 // Test the header to see if it is unlocked.
4550 Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
4551 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4552 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4553 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4554 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4555
4556 generate_slow_guard(test_unlocked, slow_region);
4557
4558 // Get the hash value and check to see that it has been properly assigned.
4559 // We depend on hash_mask being at most 32 bits and avoid the use of
4560 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4561 // vm: see markWord.hpp.
4562 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4563 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4564 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4565 // This hack lets the hash bits live anywhere in the mark object now, as long
4566 // as the shift drops the relevant bits into the low 32 bits. Note that
4567 // Java spec says that HashCode is an int so there's no point in capturing
4568 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4569 hshifted_header = ConvX2I(hshifted_header);
4570 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4596 // this->control() comes from set_results_for_java_call
4597 result_reg->init_req(_slow_path, control());
4598 result_val->init_req(_slow_path, slow_result);
4599 result_io ->set_req(_slow_path, i_o());
4600 result_mem ->set_req(_slow_path, reset_memory());
4601 }
4602
4603 // Return the combined state.
4604 set_i_o( _gvn.transform(result_io) );
4605 set_all_memory( _gvn.transform(result_mem));
4606
4607 set_result(result_reg, result_val);
4608 return true;
4609 }
4610
4611 //---------------------------inline_native_getClass----------------------------
4612 // public final native Class<?> java.lang.Object.getClass();
4613 //
4614 // Build special case code for calls to getClass on an object.
4615 bool LibraryCallKit::inline_native_getClass() {
4616 Node* obj = null_check_receiver();
4617 if (stopped()) return true;
4618 set_result(load_mirror_from_klass(load_object_klass(obj)));
4619 return true;
4620 }
4621
4622 //-----------------inline_native_Reflection_getCallerClass---------------------
4623 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4624 //
4625 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4626 //
4627 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4628 // in that it must skip particular security frames and checks for
4629 // caller sensitive methods.
4630 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4631 #ifndef PRODUCT
4632 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4633 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4634 }
4635 #endif
4636
4897 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
4898 flags |= RC_NARROW_MEM; // narrow in memory
4899 }
4900 }
4901
4902 // Call it. Note that the length argument is not scaled.
4903 make_runtime_call(flags,
4904 OptoRuntime::fast_arraycopy_Type(),
4905 StubRoutines::unsafe_arraycopy(),
4906 "unsafe_arraycopy",
4907 dst_type,
4908 src_addr, dst_addr, size XTOP);
4909
4910 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
4911
4912 return true;
4913 }
4914
4915 #undef XTOP
4916
4917 //------------------------clone_coping-----------------------------------
4918 // Helper function for inline_native_clone.
4919 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4920 assert(obj_size != nullptr, "");
4921 Node* raw_obj = alloc_obj->in(1);
4922 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4923
4924 AllocateNode* alloc = nullptr;
4925 if (ReduceBulkZeroing) {
4926 // We will be completely responsible for initializing this object -
4927 // mark Initialize node as complete.
4928 alloc = AllocateNode::Ideal_allocation(alloc_obj);
4929 // The object was just allocated - there should be no any stores!
4930 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
4931 // Mark as complete_with_arraycopy so that on AllocateNode
4932 // expansion, we know this AllocateNode is initialized by an array
4933 // copy and a StoreStore barrier exists after the array copy.
4934 alloc->initialization()->set_complete_with_arraycopy();
4935 }
4936
4961 // not cloneable or finalizer => slow path to out-of-line Object.clone
4962 //
4963 // The general case has two steps, allocation and copying.
4964 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4965 //
4966 // Copying also has two cases, oop arrays and everything else.
4967 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4968 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4969 //
4970 // These steps fold up nicely if and when the cloned object's klass
4971 // can be sharply typed as an object array, a type array, or an instance.
4972 //
4973 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4974 PhiNode* result_val;
4975
4976 // Set the reexecute bit for the interpreter to reexecute
4977 // the bytecode that invokes Object.clone if deoptimization happens.
4978 { PreserveReexecuteState preexecs(this);
4979 jvms()->set_should_reexecute(true);
4980
4981 Node* obj = null_check_receiver();
4982 if (stopped()) return true;
4983
4984 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4985
4986 // If we are going to clone an instance, we need its exact type to
4987 // know the number and types of fields to convert the clone to
4988 // loads/stores. Maybe a speculative type can help us.
4989 if (!obj_type->klass_is_exact() &&
4990 obj_type->speculative_type() != nullptr &&
4991 obj_type->speculative_type()->is_instance_klass()) {
4992 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4993 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4994 !spec_ik->has_injected_fields()) {
4995 if (!obj_type->isa_instptr() ||
4996 obj_type->is_instptr()->instance_klass()->has_subklass()) {
4997 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4998 }
4999 }
5000 }
5001
5002 // Conservatively insert a memory barrier on all memory slices.
5003 // Do not let writes into the original float below the clone.
5004 insert_mem_bar(Op_MemBarCPUOrder);
5005
5006 // paths into result_reg:
5007 enum {
5008 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5009 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5010 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5011 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5012 PATH_LIMIT
5013 };
5014 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5015 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5016 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5017 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5018 record_for_igvn(result_reg);
5019
5020 Node* obj_klass = load_object_klass(obj);
5021 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5022 if (array_ctl != nullptr) {
5023 // It's an array.
5024 PreserveJVMState pjvms(this);
5025 set_control(array_ctl);
5026 Node* obj_length = load_array_length(obj);
5027 Node* array_size = nullptr; // Size of the array without object alignment padding.
5028 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5029
5030 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5031 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5032 // If it is an oop array, it requires very special treatment,
5033 // because gc barriers are required when accessing the array.
5034 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5035 if (is_obja != nullptr) {
5036 PreserveJVMState pjvms2(this);
5037 set_control(is_obja);
5038 // Generate a direct call to the right arraycopy function(s).
5039 // Clones are always tightly coupled.
5040 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5041 ac->set_clone_oop_array();
5042 Node* n = _gvn.transform(ac);
5043 assert(n == ac, "cannot disappear");
5044 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5045
5046 result_reg->init_req(_objArray_path, control());
5047 result_val->init_req(_objArray_path, alloc_obj);
5048 result_i_o ->set_req(_objArray_path, i_o());
5049 result_mem ->set_req(_objArray_path, reset_memory());
5050 }
5051 }
5052 // Otherwise, there are no barriers to worry about.
5053 // (We can dispense with card marks if we know the allocation
5054 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5055 // causes the non-eden paths to take compensating steps to
5056 // simulate a fresh allocation, so that no further
5057 // card marks are required in compiled code to initialize
5058 // the object.)
5059
5060 if (!stopped()) {
5061 copy_to_clone(obj, alloc_obj, array_size, true);
5062
5063 // Present the results of the copy.
5064 result_reg->init_req(_array_path, control());
5065 result_val->init_req(_array_path, alloc_obj);
5066 result_i_o ->set_req(_array_path, i_o());
5067 result_mem ->set_req(_array_path, reset_memory());
5068 }
5069 }
5070
5071 // We only go to the instance fast case code if we pass a number of guards.
5072 // The paths which do not pass are accumulated in the slow_region.
5073 RegionNode* slow_region = new RegionNode(1);
5074 record_for_igvn(slow_region);
5075 if (!stopped()) {
5076 // It's an instance (we did array above). Make the slow-path tests.
5077 // If this is a virtual call, we generate a funny guard. We grab
5078 // the vtable entry corresponding to clone() from the target object.
5079 // If the target method which we are calling happens to be the
5080 // Object clone() method, we pass the guard. We do not need this
5081 // guard for non-virtual calls; the caller is known to be the native
5082 // Object clone().
5083 if (is_virtual) {
5084 generate_virtual_guard(obj_klass, slow_region);
5085 }
5086
5087 // The object must be easily cloneable and must not have a finalizer.
5088 // Both of these conditions may be checked in a single test.
5089 // We could optimize the test further, but we don't care.
5090 generate_access_flags_guard(obj_klass,
5091 // Test both conditions:
5092 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5093 // Must be cloneable but not finalizer:
5094 JVM_ACC_IS_CLONEABLE_FAST,
5186 set_jvms(sfpt->jvms());
5187 _reexecute_sp = jvms()->sp();
5188
5189 return saved_jvms;
5190 }
5191 }
5192 }
5193 return nullptr;
5194 }
5195
5196 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5197 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5198 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5199 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5200 uint size = alloc->req();
5201 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5202 old_jvms->set_map(sfpt);
5203 for (uint i = 0; i < size; i++) {
5204 sfpt->init_req(i, alloc->in(i));
5205 }
5206 // re-push array length for deoptimization
5207 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
5208 old_jvms->set_sp(old_jvms->sp()+1);
5209 old_jvms->set_monoff(old_jvms->monoff()+1);
5210 old_jvms->set_scloff(old_jvms->scloff()+1);
5211 old_jvms->set_endoff(old_jvms->endoff()+1);
5212 old_jvms->set_should_reexecute(true);
5213
5214 sfpt->set_i_o(map()->i_o());
5215 sfpt->set_memory(map()->memory());
5216 sfpt->set_control(map()->control());
5217 return sfpt;
5218 }
5219
5220 // In case of a deoptimization, we restart execution at the
5221 // allocation, allocating a new array. We would leave an uninitialized
5222 // array in the heap that GCs wouldn't expect. Move the allocation
5223 // after the traps so we don't allocate the array if we
5224 // deoptimize. This is possible because tightly_coupled_allocation()
5225 // guarantees there's no observer of the allocated array at this point
5226 // and the control flow is simple enough.
5227 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5228 int saved_reexecute_sp, uint new_idx) {
5229 if (saved_jvms_before_guards != nullptr && !stopped()) {
5230 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5231
5232 assert(alloc != nullptr, "only with a tightly coupled allocation");
5233 // restore JVM state to the state at the arraycopy
5234 saved_jvms_before_guards->map()->set_control(map()->control());
5235 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5236 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5237 // If we've improved the types of some nodes (null check) while
5238 // emitting the guards, propagate them to the current state
5239 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5240 set_jvms(saved_jvms_before_guards);
5241 _reexecute_sp = saved_reexecute_sp;
5242
5243 // Remove the allocation from above the guards
5244 CallProjections callprojs;
5245 alloc->extract_projections(&callprojs, true);
5246 InitializeNode* init = alloc->initialization();
5247 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5248 C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5249 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5250
5251 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5252 // the allocation (i.e. is only valid if the allocation succeeds):
5253 // 1) replace CastIINode with AllocateArrayNode's length here
5254 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5255 //
5256 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5257 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5258 Node* init_control = init->proj_out(TypeFunc::Control);
5259 Node* alloc_length = alloc->Ideal_length();
5260 #ifdef ASSERT
5261 Node* prev_cast = nullptr;
5262 #endif
5263 for (uint i = 0; i < init_control->outcnt(); i++) {
5264 Node* init_out = init_control->raw_out(i);
5265 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5266 #ifdef ASSERT
5267 if (prev_cast == nullptr) {
5268 prev_cast = init_out;
5270 if (prev_cast->cmp(*init_out) == false) {
5271 prev_cast->dump();
5272 init_out->dump();
5273 assert(false, "not equal CastIINode");
5274 }
5275 }
5276 #endif
5277 C->gvn_replace_by(init_out, alloc_length);
5278 }
5279 }
5280 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5281
5282 // move the allocation here (after the guards)
5283 _gvn.hash_delete(alloc);
5284 alloc->set_req(TypeFunc::Control, control());
5285 alloc->set_req(TypeFunc::I_O, i_o());
5286 Node *mem = reset_memory();
5287 set_all_memory(mem);
5288 alloc->set_req(TypeFunc::Memory, mem);
5289 set_control(init->proj_out_or_null(TypeFunc::Control));
5290 set_i_o(callprojs.fallthrough_ioproj);
5291
5292 // Update memory as done in GraphKit::set_output_for_allocation()
5293 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5294 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5295 if (ary_type->isa_aryptr() && length_type != nullptr) {
5296 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5297 }
5298 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5299 int elemidx = C->get_alias_index(telemref);
5300 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5301 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5302
5303 Node* allocx = _gvn.transform(alloc);
5304 assert(allocx == alloc, "where has the allocation gone?");
5305 assert(dest->is_CheckCastPP(), "not an allocation result?");
5306
5307 _gvn.hash_delete(dest);
5308 dest->set_req(0, control());
5309 Node* destx = _gvn.transform(dest);
5310 assert(destx == dest, "where has the allocation result gone?");
5572 top_src = src_type->isa_aryptr();
5573 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5574 src_spec = true;
5575 }
5576 if (!has_dest) {
5577 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5578 dest_type = _gvn.type(dest);
5579 top_dest = dest_type->isa_aryptr();
5580 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5581 dest_spec = true;
5582 }
5583 }
5584 }
5585
5586 if (has_src && has_dest && can_emit_guards) {
5587 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5588 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5589 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5590 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5591
5592 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5593 // If both arrays are object arrays then having the exact types
5594 // for both will remove the need for a subtype check at runtime
5595 // before the call and may make it possible to pick a faster copy
5596 // routine (without a subtype check on every element)
5597 // Do we have the exact type of src?
5598 bool could_have_src = src_spec;
5599 // Do we have the exact type of dest?
5600 bool could_have_dest = dest_spec;
5601 ciKlass* src_k = nullptr;
5602 ciKlass* dest_k = nullptr;
5603 if (!src_spec) {
5604 src_k = src_type->speculative_type_not_null();
5605 if (src_k != nullptr && src_k->is_array_klass()) {
5606 could_have_src = true;
5607 }
5608 }
5609 if (!dest_spec) {
5610 dest_k = dest_type->speculative_type_not_null();
5611 if (dest_k != nullptr && dest_k->is_array_klass()) {
5612 could_have_dest = true;
5613 }
5614 }
5615 if (could_have_src && could_have_dest) {
5616 // If we can have both exact types, emit the missing guards
5617 if (could_have_src && !src_spec) {
5618 src = maybe_cast_profiled_obj(src, src_k, true);
5619 }
5620 if (could_have_dest && !dest_spec) {
5621 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5622 }
5623 }
5624 }
5625 }
5626
5627 ciMethod* trap_method = method();
5628 int trap_bci = bci();
5629 if (saved_jvms_before_guards != nullptr) {
5630 trap_method = alloc->jvms()->method();
5631 trap_bci = alloc->jvms()->bci();
5632 }
5633
5634 bool negative_length_guard_generated = false;
5635
5636 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5637 can_emit_guards &&
5638 !src->is_top() && !dest->is_top()) {
5639 // validate arguments: enables transformation the ArrayCopyNode
5640 validated = true;
5641
5642 RegionNode* slow_region = new RegionNode(1);
5643 record_for_igvn(slow_region);
5644
5645 // (1) src and dest are arrays.
5646 generate_non_array_guard(load_object_klass(src), slow_region);
5647 generate_non_array_guard(load_object_klass(dest), slow_region);
5648
5649 // (2) src and dest arrays must have elements of the same BasicType
5650 // done at macro expansion or at Ideal transformation time
5651
5652 // (4) src_offset must not be negative.
5653 generate_negative_guard(src_offset, slow_region);
5654
5655 // (5) dest_offset must not be negative.
5656 generate_negative_guard(dest_offset, slow_region);
5657
5658 // (7) src_offset + length must not exceed length of src.
5661 slow_region);
5662
5663 // (8) dest_offset + length must not exceed length of dest.
5664 generate_limit_guard(dest_offset, length,
5665 load_array_length(dest),
5666 slow_region);
5667
5668 // (6) length must not be negative.
5669 // This is also checked in generate_arraycopy() during macro expansion, but
5670 // we also have to check it here for the case where the ArrayCopyNode will
5671 // be eliminated by Escape Analysis.
5672 if (EliminateAllocations) {
5673 generate_negative_guard(length, slow_region);
5674 negative_length_guard_generated = true;
5675 }
5676
5677 // (9) each element of an oop array must be assignable
5678 Node* dest_klass = load_object_klass(dest);
5679 if (src != dest) {
5680 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
5681
5682 if (not_subtype_ctrl != top()) {
5683 PreserveJVMState pjvms(this);
5684 set_control(not_subtype_ctrl);
5685 uncommon_trap(Deoptimization::Reason_intrinsic,
5686 Deoptimization::Action_make_not_entrant);
5687 assert(stopped(), "Should be stopped");
5688 }
5689 }
5690 {
5691 PreserveJVMState pjvms(this);
5692 set_control(_gvn.transform(slow_region));
5693 uncommon_trap(Deoptimization::Reason_intrinsic,
5694 Deoptimization::Action_make_not_entrant);
5695 assert(stopped(), "Should be stopped");
5696 }
5697
5698 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5699 const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
5700 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5701 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
5702 }
5703
5704 if (stopped()) {
5705 return true;
5706 }
5707
5708 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
5709 // Create LoadRange and LoadKlass nodes for use during macro expansion here
5710 // so the compiler has a chance to eliminate them: during macro expansion,
5711 // we have to set their control (CastPP nodes are eliminated).
5712 load_object_klass(src), load_object_klass(dest),
5713 load_array_length(src), load_array_length(dest));
5714
5715 ac->set_arraycopy(validated);
5716
5717 Node* n = _gvn.transform(ac);
5718 if (n == ac) {
5719 ac->connect_outputs(this);
5720 } else {
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciFlatArrayKlass.hpp"
28 #include "ci/ciUtilities.inline.hpp"
29 #include "classfile/vmIntrinsics.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/idealKit.hpp"
45 #include "opto/library_call.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/mulnode.hpp"
309 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
310 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
311 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar(StrIntrinsicNode::U);
312 case vmIntrinsics::_indexOfL_char: return inline_string_indexOfChar(StrIntrinsicNode::L);
313
314 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
315 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
316
317 case vmIntrinsics::_vectorizedHashCode: return inline_vectorizedHashCode();
318
319 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
320 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
321 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
322 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
323
324 case vmIntrinsics::_compressStringC:
325 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
326 case vmIntrinsics::_inflateStringC:
327 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
328
329 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
330 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
331 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
332 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
333 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
334 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
335 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
336 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
337 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
338 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
339 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
340 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false, true);
341
342 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
343 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
344 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
345 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
346 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
347 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
348 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
349 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
350 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
351 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false, true);
352
353 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
354 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
355 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
356 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
357 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
358 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
359 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
360 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
361 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
362
363 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
364 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
365 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
366 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
367 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
368 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
369 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
370 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
371 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
495 "notifyJvmtiEnd", false, true);
496 case vmIntrinsics::_notifyJvmtiVThreadMount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_mount()),
497 "notifyJvmtiMount", false, false);
498 case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
499 "notifyJvmtiUnmount", false, false);
500 case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
501 #endif
502
503 #ifdef JFR_HAVE_INTRINSICS
504 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JfrTime::time_function()), "counterTime");
505 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
506 case vmIntrinsics::_jvm_commit: return inline_native_jvm_commit();
507 #endif
508 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
509 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
510 case vmIntrinsics::_writeback0: return inline_unsafe_writeback0();
511 case vmIntrinsics::_writebackPreSync0: return inline_unsafe_writebackSync0(true);
512 case vmIntrinsics::_writebackPostSync0: return inline_unsafe_writebackSync0(false);
513 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
514 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
515 case vmIntrinsics::_isFlatArray: return inline_unsafe_isFlatArray();
516 case vmIntrinsics::_getLength: return inline_native_getLength();
517 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
518 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
519 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
520 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
521 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex(T_INT);
522 case vmIntrinsics::_Preconditions_checkLongIndex: return inline_preconditions_checkIndex(T_LONG);
523 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
524
525 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
526 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
527 case vmIntrinsics::_newNullRestrictedArray: return inline_newNullRestrictedArray();
528
529 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
530
531 case vmIntrinsics::_isInstance:
532 case vmIntrinsics::_getModifiers:
533 case vmIntrinsics::_isInterface:
534 case vmIntrinsics::_isArray:
535 case vmIntrinsics::_isPrimitive:
536 case vmIntrinsics::_isHidden:
537 case vmIntrinsics::_getSuperclass:
538 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
539
540 case vmIntrinsics::_floatToRawIntBits:
541 case vmIntrinsics::_floatToIntBits:
542 case vmIntrinsics::_intBitsToFloat:
543 case vmIntrinsics::_doubleToRawLongBits:
544 case vmIntrinsics::_doubleToLongBits:
545 case vmIntrinsics::_longBitsToDouble:
546 case vmIntrinsics::_floatToFloat16:
547 case vmIntrinsics::_float16ToFloat: return inline_fp_conversions(intrinsic_id());
2196 case vmIntrinsics::_remainderUnsigned_l: {
2197 zero_check_long(argument(2));
2198 // Compile-time detect of null-exception
2199 if (stopped()) {
2200 return true; // keep the graph constructed so far
2201 }
2202 n = new UModLNode(control(), argument(0), argument(2));
2203 break;
2204 }
2205 default: fatal_unexpected_iid(id); break;
2206 }
2207 set_result(_gvn.transform(n));
2208 return true;
2209 }
2210
2211 //----------------------------inline_unsafe_access----------------------------
2212
2213 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2214 // Attempt to infer a sharper value type from the offset and base type.
2215 ciKlass* sharpened_klass = nullptr;
2216 bool null_free = false;
2217
2218 // See if it is an instance field, with an object type.
2219 if (alias_type->field() != nullptr) {
2220 if (alias_type->field()->type()->is_klass()) {
2221 sharpened_klass = alias_type->field()->type()->as_klass();
2222 null_free = alias_type->field()->is_null_free();
2223 }
2224 }
2225
2226 const TypeOopPtr* result = nullptr;
2227 // See if it is a narrow oop array.
2228 if (adr_type->isa_aryptr()) {
2229 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2230 const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr();
2231 null_free = adr_type->is_aryptr()->is_null_free();
2232 if (elem_type != nullptr && elem_type->is_loaded()) {
2233 // Sharpen the value type.
2234 result = elem_type;
2235 }
2236 }
2237 }
2238
2239 // The sharpened class might be unloaded if there is no class loader
2240 // contraint in place.
2241 if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) {
2242 // Sharpen the value type.
2243 result = TypeOopPtr::make_from_klass(sharpened_klass);
2244 if (null_free) {
2245 result = result->join_speculative(TypePtr::NOTNULL)->is_oopptr();
2246 }
2247 }
2248 if (result != nullptr) {
2249 #ifndef PRODUCT
2250 if (C->print_intrinsics() || C->print_inlining()) {
2251 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2252 tty->print(" sharpened value: "); result->dump(); tty->cr();
2253 }
2254 #endif
2255 }
2256 return result;
2257 }
2258
2259 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2260 switch (kind) {
2261 case Relaxed:
2262 return MO_UNORDERED;
2263 case Opaque:
2264 return MO_RELAXED;
2265 case Acquire:
2266 return MO_ACQUIRE;
2267 case Release:
2268 return MO_RELEASE;
2269 case Volatile:
2270 return MO_SEQ_CST;
2271 default:
2272 ShouldNotReachHere();
2273 return 0;
2274 }
2275 }
2276
2277 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned, const bool is_flat) {
2278 if (callee()->is_static()) return false; // caller must have the capability!
2279 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2280 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2281 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2282 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2283
2284 if (is_reference_type(type)) {
2285 decorators |= ON_UNKNOWN_OOP_REF;
2286 }
2287
2288 if (unaligned) {
2289 decorators |= C2_UNALIGNED;
2290 }
2291
2292 #ifndef PRODUCT
2293 {
2294 ResourceMark rm;
2295 // Check the signatures.
2296 ciSignature* sig = callee()->signature();
2297 #ifdef ASSERT
2298 if (!is_store) {
2299 // Object getReference(Object base, int/long offset), etc.
2300 BasicType rtype = sig->return_type()->basic_type();
2301 assert(rtype == type, "getter must return the expected value");
2302 assert(sig->count() == 2 || (is_flat && sig->count() == 3), "oop getter has 2 or 3 arguments");
2303 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2304 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2305 } else {
2306 // void putReference(Object base, int/long offset, Object x), etc.
2307 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2308 assert(sig->count() == 3 || (is_flat && sig->count() == 4), "oop putter has 3 arguments");
2309 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2310 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2311 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2312 assert(vtype == type, "putter must accept the expected value");
2313 }
2314 #endif // ASSERT
2315 }
2316 #endif //PRODUCT
2317
2318 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2319
2320 Node* receiver = argument(0); // type: oop
2321
2322 // Build address expression.
2323 Node* heap_base_oop = top();
2324
2325 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2326 Node* base = argument(1); // type: oop
2327 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2328 Node* offset = argument(2); // type: long
2329 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2330 // to be plain byte offsets, which are also the same as those accepted
2331 // by oopDesc::field_addr.
2332 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2333 "fieldOffset must be byte-scaled");
2334
2335 ciInlineKlass* inline_klass = nullptr;
2336 if (is_flat) {
2337 const TypeInstPtr* cls = _gvn.type(argument(4))->isa_instptr();
2338 if (cls == nullptr || cls->const_oop() == nullptr) {
2339 return false;
2340 }
2341 ciType* mirror_type = cls->const_oop()->as_instance()->java_mirror_type();
2342 if (!mirror_type->is_inlinetype()) {
2343 return false;
2344 }
2345 inline_klass = mirror_type->as_inline_klass();
2346 }
2347
2348 if (base->is_InlineType()) {
2349 InlineTypeNode* vt = base->as_InlineType();
2350 if (is_store) {
2351 if (!vt->is_allocated(&_gvn)) {
2352 return false;
2353 }
2354 base = vt->get_oop();
2355 } else {
2356 if (offset->is_Con()) {
2357 long off = find_long_con(offset, 0);
2358 ciInlineKlass* vk = vt->type()->inline_klass();
2359 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2360 return false;
2361 }
2362
2363 ciField* field = vk->get_non_flat_field_by_offset(off);
2364 if (field != nullptr) {
2365 BasicType bt = type2field[field->type()->basic_type()];
2366 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2367 bt = T_OBJECT;
2368 }
2369 if (bt == type && (!field->is_flat() || field->type() == inline_klass)) {
2370 Node* value = vt->field_value_by_offset(off, false);
2371 if (value->is_InlineType()) {
2372 value = value->as_InlineType()->adjust_scalarization_depth(this);
2373 }
2374 set_result(value);
2375 return true;
2376 }
2377 }
2378 }
2379 {
2380 // Re-execute the unsafe access if allocation triggers deoptimization.
2381 PreserveReexecuteState preexecs(this);
2382 jvms()->set_should_reexecute(true);
2383 vt = vt->buffer(this);
2384 }
2385 base = vt->get_oop();
2386 }
2387 }
2388
2389 // 32-bit machines ignore the high half!
2390 offset = ConvL2X(offset);
2391
2392 // Save state and restore on bailout
2393 uint old_sp = sp();
2394 SafePointNode* old_map = clone_map();
2395
2396 Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
2397
2398 if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
2399 if (type != T_OBJECT && (inline_klass == nullptr || !inline_klass->has_object_fields())) {
2400 decorators |= IN_NATIVE; // off-heap primitive access
2401 } else {
2402 set_map(old_map);
2403 set_sp(old_sp);
2404 return false; // off-heap oop accesses are not supported
2405 }
2406 } else {
2407 heap_base_oop = base; // on-heap or mixed access
2408 }
2409
2410 // Can base be null? Otherwise, always on-heap access.
2411 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base));
2412
2413 if (!can_access_non_heap) {
2414 decorators |= IN_HEAP;
2415 }
2416
2417 Node* val = is_store ? argument(4 + (is_flat ? 1 : 0)) : nullptr;
2418
2419 const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
2420 if (adr_type == TypePtr::NULL_PTR) {
2421 set_map(old_map);
2422 set_sp(old_sp);
2423 return false; // off-heap access with zero address
2424 }
2425
2426 // Try to categorize the address.
2427 Compile::AliasType* alias_type = C->alias_type(adr_type);
2428 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2429
2430 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2431 alias_type->adr_type() == TypeAryPtr::RANGE) {
2432 set_map(old_map);
2433 set_sp(old_sp);
2434 return false; // not supported
2435 }
2436
2437 bool mismatched = false;
2438 BasicType bt = T_ILLEGAL;
2439 ciField* field = nullptr;
2440 if (adr_type->isa_instptr()) {
2441 const TypeInstPtr* instptr = adr_type->is_instptr();
2442 ciInstanceKlass* k = instptr->instance_klass();
2443 int off = instptr->offset();
2444 if (instptr->const_oop() != nullptr &&
2445 k == ciEnv::current()->Class_klass() &&
2446 instptr->offset() >= (k->size_helper() * wordSize)) {
2447 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2448 field = k->get_field_by_offset(off, true);
2449 } else {
2450 field = k->get_non_flat_field_by_offset(off);
2451 }
2452 if (field != nullptr) {
2453 bt = type2field[field->type()->basic_type()];
2454 }
2455 assert(bt == alias_type->basic_type() || is_flat, "should match");
2456 } else {
2457 bt = alias_type->basic_type();
2458 }
2459
2460 if (bt != T_ILLEGAL) {
2461 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2462 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2463 // Alias type doesn't differentiate between byte[] and boolean[]).
2464 // Use address type to get the element type.
2465 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2466 }
2467 if (is_reference_type(bt, true)) {
2468 // accessing an array field with getReference is not a mismatch
2469 bt = T_OBJECT;
2470 }
2471 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2472 // Don't intrinsify mismatched object accesses
2473 set_map(old_map);
2474 set_sp(old_sp);
2475 return false;
2476 }
2477 mismatched = (bt != type);
2478 } else if (alias_type->adr_type()->isa_oopptr()) {
2479 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2480 }
2481
2482 if (is_flat) {
2483 if (adr_type->isa_instptr()) {
2484 if (field == nullptr || field->type() != inline_klass) {
2485 mismatched = true;
2486 }
2487 } else if (adr_type->isa_aryptr()) {
2488 const Type* elem = adr_type->is_aryptr()->elem();
2489 if (!adr_type->is_flat() || elem->inline_klass() != inline_klass) {
2490 mismatched = true;
2491 }
2492 } else {
2493 mismatched = true;
2494 }
2495 if (is_store) {
2496 const Type* val_t = _gvn.type(val);
2497 if (!val_t->is_inlinetypeptr() || val_t->inline_klass() != inline_klass) {
2498 set_map(old_map);
2499 set_sp(old_sp);
2500 return false;
2501 }
2502 }
2503 }
2504
2505 destruct_map_clone(old_map);
2506 assert(!mismatched || is_flat || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2507
2508 if (mismatched) {
2509 decorators |= C2_MISMATCHED;
2510 }
2511
2512 // First guess at the value type.
2513 const Type *value_type = Type::get_const_basic_type(type);
2514
2515 // Figure out the memory ordering.
2516 decorators |= mo_decorator_for_access_kind(kind);
2517
2518 if (!is_store) {
2519 if (type == T_OBJECT && !is_flat) {
2520 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2521 if (tjp != nullptr) {
2522 value_type = tjp;
2523 }
2524 }
2525 }
2526
2527 receiver = null_check(receiver);
2528 if (stopped()) {
2529 return true;
2530 }
2531 // Heap pointers get a null-check from the interpreter,
2532 // as a courtesy. However, this is not guaranteed by Unsafe,
2533 // and it is not possible to fully distinguish unintended nulls
2534 // from intended ones in this API.
2535
2536 if (!is_store) {
2537 Node* p = nullptr;
2538 // Try to constant fold a load from a constant field
2539
2540 if (heap_base_oop != top() && field != nullptr && field->is_constant() && !field->is_flat() && !mismatched) {
2541 // final or stable field
2542 p = make_constant_from_field(field, heap_base_oop);
2543 }
2544
2545 if (p == nullptr) { // Could not constant fold the load
2546 if (is_flat) {
2547 if (adr_type->isa_instptr() && !mismatched) {
2548 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2549 int offset = adr_type->is_instptr()->offset();
2550 p = InlineTypeNode::make_from_flat(this, inline_klass, base, base, holder, offset, decorators);
2551 } else {
2552 p = InlineTypeNode::make_from_flat(this, inline_klass, base, adr, nullptr, 0, decorators);
2553 }
2554 } else {
2555 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2556 const TypeOopPtr* ptr = value_type->make_oopptr();
2557 if (ptr != nullptr && ptr->is_inlinetypeptr()) {
2558 // Load a non-flattened inline type from memory
2559 p = InlineTypeNode::make_from_oop(this, p, ptr->inline_klass(), !ptr->maybe_null());
2560 }
2561 }
2562 // Normalize the value returned by getBoolean in the following cases
2563 if (type == T_BOOLEAN &&
2564 (mismatched ||
2565 heap_base_oop == top() || // - heap_base_oop is null or
2566 (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null
2567 // and the unsafe access is made to large offset
2568 // (i.e., larger than the maximum offset necessary for any
2569 // field access)
2570 ) {
2571 IdealKit ideal = IdealKit(this);
2572 #define __ ideal.
2573 IdealVariable normalized_result(ideal);
2574 __ declarations_done();
2575 __ set(normalized_result, p);
2576 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2577 __ set(normalized_result, ideal.ConI(1));
2578 ideal.end_if();
2579 final_sync(ideal);
2580 p = __ value(normalized_result);
2581 #undef __
2582 }
2583 }
2584 if (type == T_ADDRESS) {
2585 p = gvn().transform(new CastP2XNode(nullptr, p));
2586 p = ConvX2UL(p);
2587 }
2588 // The load node has the control of the preceding MemBarCPUOrder. All
2589 // following nodes will have the control of the MemBarCPUOrder inserted at
2590 // the end of this method. So, pushing the load onto the stack at a later
2591 // point is fine.
2592 set_result(p);
2593 } else {
2594 if (bt == T_ADDRESS) {
2595 // Repackage the long as a pointer.
2596 val = ConvL2X(val);
2597 val = gvn().transform(new CastX2PNode(val));
2598 }
2599 if (is_flat) {
2600 if (adr_type->isa_instptr() && !mismatched) {
2601 ciInstanceKlass* holder = adr_type->is_instptr()->instance_klass();
2602 int offset = adr_type->is_instptr()->offset();
2603 val->as_InlineType()->store_flat(this, base, base, holder, offset, decorators);
2604 } else {
2605 val->as_InlineType()->store_flat(this, base, adr, nullptr, 0, decorators);
2606 }
2607 } else {
2608 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2609 }
2610 }
2611
2612 if (argument(1)->is_InlineType() && is_store) {
2613 InlineTypeNode* value = InlineTypeNode::make_from_oop(this, base, _gvn.type(argument(1))->inline_klass());
2614 value = value->make_larval(this, false);
2615 replace_in_map(argument(1), value);
2616 }
2617
2618 return true;
2619 }
2620
2621 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2622 Node* receiver = argument(0);
2623 Node* value = argument(1);
2624 if (!value->is_InlineType()) {
2625 return false;
2626 }
2627
2628 receiver = null_check(receiver);
2629 if (stopped()) {
2630 return true;
2631 }
2632
2633 set_result(value->as_InlineType()->make_larval(this, true));
2634 return true;
2635 }
2636
2637 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2638 Node* receiver = argument(0);
2639 Node* buffer = argument(1);
2640 if (!buffer->is_InlineType()) {
2641 return false;
2642 }
2643 InlineTypeNode* vt = buffer->as_InlineType();
2644 if (!vt->is_allocated(&_gvn)) {
2645 return false;
2646 }
2647 // TODO 8239003 Why is this needed?
2648 if (AllocateNode::Ideal_allocation(vt->get_oop()) == nullptr) {
2649 return false;
2650 }
2651
2652 receiver = null_check(receiver);
2653 if (stopped()) {
2654 return true;
2655 }
2656
2657 set_result(vt->finish_larval(this));
2658 return true;
2659 }
2660
2661 //----------------------------inline_unsafe_load_store----------------------------
2662 // This method serves a couple of different customers (depending on LoadStoreKind):
2663 //
2664 // LS_cmp_swap:
2665 //
2666 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2667 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2668 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2669 //
2670 // LS_cmp_swap_weak:
2671 //
2672 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2673 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2674 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2675 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2676 //
2677 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2846 }
2847 case LS_cmp_swap:
2848 case LS_cmp_swap_weak:
2849 case LS_get_add:
2850 break;
2851 default:
2852 ShouldNotReachHere();
2853 }
2854
2855 // Null check receiver.
2856 receiver = null_check(receiver);
2857 if (stopped()) {
2858 return true;
2859 }
2860
2861 int alias_idx = C->get_alias_index(adr_type);
2862
2863 if (is_reference_type(type)) {
2864 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2865
2866 if (oldval != nullptr && oldval->is_InlineType()) {
2867 // Re-execute the unsafe access if allocation triggers deoptimization.
2868 PreserveReexecuteState preexecs(this);
2869 jvms()->set_should_reexecute(true);
2870 oldval = oldval->as_InlineType()->buffer(this)->get_oop();
2871 }
2872 if (newval != nullptr && newval->is_InlineType()) {
2873 // Re-execute the unsafe access if allocation triggers deoptimization.
2874 PreserveReexecuteState preexecs(this);
2875 jvms()->set_should_reexecute(true);
2876 newval = newval->as_InlineType()->buffer(this)->get_oop();
2877 }
2878
2879 // Transformation of a value which could be null pointer (CastPP #null)
2880 // could be delayed during Parse (for example, in adjust_map_after_if()).
2881 // Execute transformation here to avoid barrier generation in such case.
2882 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2883 newval = _gvn.makecon(TypePtr::NULL_PTR);
2884
2885 if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2886 // Refine the value to a null constant, when it is known to be null
2887 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2888 }
2889 }
2890
2891 Node* result = nullptr;
2892 switch (kind) {
2893 case LS_cmp_exchange: {
2894 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2895 oldval, newval, value_type, type, decorators);
2896 break;
2897 }
2898 case LS_cmp_swap_weak:
3045 Deoptimization::Action_make_not_entrant);
3046 }
3047 if (stopped()) {
3048 return true;
3049 }
3050 #endif //INCLUDE_JVMTI
3051
3052 Node* test = nullptr;
3053 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3054 // Note: The argument might still be an illegal value like
3055 // Serializable.class or Object[].class. The runtime will handle it.
3056 // But we must make an explicit check for initialization.
3057 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3058 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3059 // can generate code to load it as unsigned byte.
3060 Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3061 Node* bits = intcon(InstanceKlass::fully_initialized);
3062 test = _gvn.transform(new SubINode(inst, bits));
3063 // The 'test' is non-zero if we need to take a slow path.
3064 }
3065 Node* obj = nullptr;
3066 const TypeInstKlassPtr* tkls = _gvn.type(kls)->isa_instklassptr();
3067 if (tkls != nullptr && tkls->instance_klass()->is_inlinetype()) {
3068 obj = InlineTypeNode::make_default(_gvn, tkls->instance_klass()->as_inline_klass())->buffer(this);
3069 } else {
3070 obj = new_instance(kls, test);
3071 }
3072 set_result(obj);
3073 return true;
3074 }
3075
3076 //------------------------inline_native_time_funcs--------------
3077 // inline code for System.currentTimeMillis() and System.nanoTime()
3078 // these have the same type and signature
3079 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3080 const TypeFunc* tf = OptoRuntime::void_long_Type();
3081 const TypePtr* no_memory_effects = nullptr;
3082 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3083 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3084 #ifdef ASSERT
3085 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3086 assert(value_top == top(), "second value must be top");
3087 #endif
3088 set_result(value);
3089 return true;
3090 }
3091
3804
3805 //------------------------inline_native_setVthread------------------
3806 bool LibraryCallKit::inline_native_setCurrentThread() {
3807 assert(C->method()->changes_current_thread(),
3808 "method changes current Thread but is not annotated ChangesCurrentThread");
3809 Node* arr = argument(1);
3810 Node* thread = _gvn.transform(new ThreadLocalNode());
3811 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset()));
3812 Node* thread_obj_handle
3813 = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered);
3814 thread_obj_handle = _gvn.transform(thread_obj_handle);
3815 const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr();
3816 access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED);
3817 JFR_ONLY(extend_setCurrentThread(thread, arr);)
3818 return true;
3819 }
3820
3821 const Type* LibraryCallKit::scopedValueCache_type() {
3822 ciKlass* objects_klass = ciObjArrayKlass::make(env()->Object_klass());
3823 const TypeOopPtr* etype = TypeOopPtr::make_from_klass(env()->Object_klass());
3824 const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS, /* stable= */ false, /* flat= */ false, /* not_flat= */ true, /* not_null_free= */ true);
3825
3826 // Because we create the scopedValue cache lazily we have to make the
3827 // type of the result BotPTR.
3828 bool xk = etype->klass_is_exact();
3829 const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, TypeAryPtr::Offset(0));
3830 return objects_type;
3831 }
3832
3833 Node* LibraryCallKit::scopedValueCache_helper() {
3834 Node* thread = _gvn.transform(new ThreadLocalNode());
3835 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedValueCache_offset()));
3836 // We cannot use immutable_memory() because we might flip onto a
3837 // different carrier thread, at which point we'll need to use that
3838 // carrier thread's cache.
3839 // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
3840 // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered));
3841 return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered);
3842 }
3843
3844 //------------------------inline_native_scopedValueCache------------------
3845 bool LibraryCallKit::inline_native_scopedValueCache() {
3846 Node* cache_obj_handle = scopedValueCache_helper();
3847 const Type* objects_type = scopedValueCache_type();
3848 set_result(access_load(cache_obj_handle, objects_type, T_OBJECT, IN_NATIVE));
3849
3850 return true;
3851 }
3852
3853 //------------------------inline_native_setScopedValueCache------------------
3854 bool LibraryCallKit::inline_native_setScopedValueCache() {
3855 Node* arr = argument(0);
3856 Node* cache_obj_handle = scopedValueCache_helper();
3857 const Type* objects_type = scopedValueCache_type();
3858
3859 const TypePtr *adr_type = _gvn.type(cache_obj_handle)->isa_ptr();
3860 access_store_at(nullptr, cache_obj_handle, adr_type, arr, objects_type, T_OBJECT, IN_NATIVE | MO_UNORDERED);
3861
3862 return true;
3863 }
3864
3865 //-----------------------load_klass_from_mirror_common-------------------------
3866 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3867 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3868 // and branch to the given path on the region.
3869 // If never_see_null, take an uncommon trap on null, so we can optimistically
3870 // compile for the non-null case.
3871 // If the region is null, force never_see_null = true.
3872 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3873 bool never_see_null,
3874 RegionNode* region,
3875 int null_path,
3876 int offset) {
3877 if (region == nullptr) never_see_null = true;
3878 Node* p = basic_plus_adr(mirror, offset);
3879 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
3880 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3881 Node* null_ctl = top();
3882 kls = null_check_oop(kls, &null_ctl, never_see_null);
3883 if (region != nullptr) {
3884 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3887 assert(null_ctl == top(), "no loose ends");
3888 }
3889 return kls;
3890 }
3891
3892 //--------------------(inline_native_Class_query helpers)---------------------
3893 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3894 // Fall through if (mods & mask) == bits, take the guard otherwise.
3895 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3896 // Branch around if the given klass has the given modifier bit set.
3897 // Like generate_guard, adds a new path onto the region.
3898 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3899 Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered);
3900 Node* mask = intcon(modifier_mask);
3901 Node* bits = intcon(modifier_bits);
3902 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3903 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3904 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3905 return generate_fair_guard(bol, region);
3906 }
3907
3908 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3909 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3910 }
3911 Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region) {
3912 return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
3913 }
3914
3915 //-------------------------inline_native_Class_query-------------------
3916 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3917 const Type* return_type = TypeInt::BOOL;
3918 Node* prim_return_value = top(); // what happens if it's a primitive class?
3919 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3920 bool expect_prim = false; // most of these guys expect to work on refs
3921
3922 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3923
3924 Node* mirror = argument(0);
3925 Node* obj = top();
3926
3927 switch (id) {
4081
4082 case vmIntrinsics::_getClassAccessFlags:
4083 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
4084 query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered);
4085 break;
4086
4087 default:
4088 fatal_unexpected_iid(id);
4089 break;
4090 }
4091
4092 // Fall-through is the normal case of a query to a real class.
4093 phi->init_req(1, query_value);
4094 region->init_req(1, control());
4095
4096 C->set_has_split_ifs(true); // Has chance for split-if optimization
4097 set_result(region, phi);
4098 return true;
4099 }
4100
4101
4102 //-------------------------inline_Class_cast-------------------
4103 bool LibraryCallKit::inline_Class_cast() {
4104 Node* mirror = argument(0); // Class
4105 Node* obj = argument(1);
4106 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
4107 if (mirror_con == nullptr) {
4108 return false; // dead path (mirror->is_top()).
4109 }
4110 if (obj == nullptr || obj->is_top()) {
4111 return false; // dead path
4112 }
4113 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
4114
4115 // First, see if Class.cast() can be folded statically.
4116 // java_mirror_type() returns non-null for compile-time Class constants.
4117 ciType* tm = mirror_con->java_mirror_type();
4118 if (tm != nullptr && tm->is_klass() &&
4119 tp != nullptr) {
4120 if (!tp->is_loaded()) {
4121 // Don't use intrinsic when class is not loaded.
4133 return false;
4134 }
4135 }
4136 }
4137
4138 // Bailout intrinsic and do normal inlining if exception path is frequent.
4139 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
4140 return false;
4141 }
4142
4143 // Generate dynamic checks.
4144 // Class.cast() is java implementation of _checkcast bytecode.
4145 // Do checkcast (Parse::do_checkcast()) optimizations here.
4146
4147 mirror = null_check(mirror);
4148 // If mirror is dead, only null-path is taken.
4149 if (stopped()) {
4150 return true;
4151 }
4152
4153 // Not-subtype or the mirror's klass ptr is nullptr (in case it is a primitive).
4154 enum { _bad_type_path = 1, _prim_path = 2, _npe_path = 3, PATH_LIMIT };
4155 RegionNode* region = new RegionNode(PATH_LIMIT);
4156 record_for_igvn(region);
4157
4158 // Now load the mirror's klass metaobject, and null-check it.
4159 // If kls is null, we have a primitive mirror and
4160 // nothing is an instance of a primitive type.
4161 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
4162
4163 Node* res = top();
4164 Node* io = i_o();
4165 Node* mem = merged_memory();
4166 if (!stopped()) {
4167
4168 Node* bad_type_ctrl = top();
4169 // Do checkcast optimizations.
4170 res = gen_checkcast(obj, kls, &bad_type_ctrl);
4171 region->init_req(_bad_type_path, bad_type_ctrl);
4172 }
4173 if (region->in(_prim_path) != top() ||
4174 region->in(_bad_type_path) != top() ||
4175 region->in(_npe_path) != top()) {
4176 // Let Interpreter throw ClassCastException.
4177 PreserveJVMState pjvms(this);
4178 set_control(_gvn.transform(region));
4179 // Set IO and memory because gen_checkcast may override them when buffering inline types
4180 set_i_o(io);
4181 set_all_memory(mem);
4182 uncommon_trap(Deoptimization::Reason_intrinsic,
4183 Deoptimization::Action_maybe_recompile);
4184 }
4185 if (!stopped()) {
4186 set_result(res);
4187 }
4188 return true;
4189 }
4190
4191
4192 //--------------------------inline_native_subtype_check------------------------
4193 // This intrinsic takes the JNI calls out of the heart of
4194 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
4195 bool LibraryCallKit::inline_native_subtype_check() {
4196 // Pull both arguments off the stack.
4197 Node* args[2]; // two java.lang.Class mirrors: superc, subc
4198 args[0] = argument(0);
4199 args[1] = argument(1);
4200 Node* klasses[2]; // corresponding Klasses: superk, subk
4201 klasses[0] = klasses[1] = top();
4202
4203 enum {
4204 // A full decision tree on {superc is prim, subc is prim}:
4205 _prim_0_path = 1, // {P,N} => false
4206 // {P,P} & superc!=subc => false
4207 _prim_same_path, // {P,P} & superc==subc => true
4208 _prim_1_path, // {N,P} => false
4209 _ref_subtype_path, // {N,N} & subtype check wins => true
4210 _both_ref_path, // {N,N} & subtype check loses => false
4211 PATH_LIMIT
4212 };
4213
4214 RegionNode* region = new RegionNode(PATH_LIMIT);
4215 RegionNode* prim_region = new RegionNode(2);
4216 Node* phi = new PhiNode(region, TypeInt::BOOL);
4217 record_for_igvn(region);
4218 record_for_igvn(prim_region);
4219
4220 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
4221 const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL;
4222 int class_klass_offset = java_lang_Class::klass_offset();
4223
4224 // First null-check both mirrors and load each mirror's klass metaobject.
4225 int which_arg;
4226 for (which_arg = 0; which_arg <= 1; which_arg++) {
4227 Node* arg = args[which_arg];
4228 arg = null_check(arg);
4229 if (stopped()) break;
4230 args[which_arg] = arg;
4231
4232 Node* p = basic_plus_adr(arg, class_klass_offset);
4233 Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type);
4234 klasses[which_arg] = _gvn.transform(kls);
4235 }
4236
4237 // Having loaded both klasses, test each for null.
4238 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
4239 for (which_arg = 0; which_arg <= 1; which_arg++) {
4240 Node* kls = klasses[which_arg];
4241 Node* null_ctl = top();
4242 kls = null_check_oop(kls, &null_ctl, never_see_null);
4243 if (which_arg == 0) {
4244 prim_region->init_req(1, null_ctl);
4245 } else {
4246 region->init_req(_prim_1_path, null_ctl);
4247 }
4248 if (stopped()) break;
4249 klasses[which_arg] = kls;
4250 }
4251
4252 if (!stopped()) {
4253 // now we have two reference types, in klasses[0..1]
4254 Node* subk = klasses[1]; // the argument to isAssignableFrom
4255 Node* superk = klasses[0]; // the receiver
4256 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
4257 region->set_req(_ref_subtype_path, control());
4258 }
4259
4260 // If both operands are primitive (both klasses null), then
4261 // we must return true when they are identical primitives.
4262 // It is convenient to test this after the first null klass check.
4263 // This path is also used if superc is a value mirror.
4264 set_control(_gvn.transform(prim_region));
4265 if (!stopped()) {
4266 // Since superc is primitive, make a guard for the superc==subc case.
4267 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
4268 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
4269 generate_fair_guard(bol_eq, region);
4270 if (region->req() == PATH_LIMIT+1) {
4271 // A guard was added. If the added guard is taken, superc==subc.
4272 region->swap_edges(PATH_LIMIT, _prim_same_path);
4273 region->del_req(PATH_LIMIT);
4274 }
4275 region->set_req(_prim_0_path, control()); // Not equal after all.
4276 }
4277
4278 // these are the only paths that produce 'true':
4279 phi->set_req(_prim_same_path, intcon(1));
4280 phi->set_req(_ref_subtype_path, intcon(1));
4281
4282 // pull together the cases:
4283 assert(region->req() == PATH_LIMIT, "sane region");
4284 for (uint i = 1; i < region->req(); i++) {
4285 Node* ctl = region->in(i);
4286 if (ctl == nullptr || ctl == top()) {
4287 region->set_req(i, top());
4288 phi ->set_req(i, top());
4289 } else if (phi->in(i) == nullptr) {
4290 phi->set_req(i, intcon(0)); // all other paths produce 'false'
4291 }
4292 }
4293
4294 set_control(_gvn.transform(region));
4295 set_result(_gvn.transform(phi));
4296 return true;
4297 }
4298
4299 //---------------------generate_array_guard_common------------------------
4300 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
4301
4302 if (stopped()) {
4303 return nullptr;
4304 }
4305
4306 // Like generate_guard, adds a new path onto the region.
4307 jint layout_con = 0;
4308 Node* layout_val = get_layout_helper(kls, layout_con);
4309 if (layout_val == nullptr) {
4310 bool query = 0;
4311 switch(kind) {
4312 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
4313 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
4314 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
4315 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
4316 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
4317 default:
4318 ShouldNotReachHere();
4319 }
4320 if (!query) {
4321 return nullptr; // never a branch
4322 } else { // always a branch
4323 Node* always_branch = control();
4324 if (region != nullptr)
4325 region->add_req(always_branch);
4326 set_control(top());
4327 return always_branch;
4328 }
4329 }
4330 unsigned int value = 0;
4331 BoolTest::mask btest = BoolTest::illegal;
4332 switch(kind) {
4333 case ObjectArray:
4334 case NonObjectArray: {
4335 value = Klass::_lh_array_tag_obj_value;
4336 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4337 btest = (kind == ObjectArray) ? BoolTest::eq : BoolTest::ne;
4338 break;
4339 }
4340 case TypeArray: {
4341 value = Klass::_lh_array_tag_type_value;
4342 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
4343 btest = BoolTest::eq;
4344 break;
4345 }
4346 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
4347 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
4348 default:
4349 ShouldNotReachHere();
4350 }
4351 // Now test the correct condition.
4352 jint nval = (jint)value;
4353 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
4354 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
4355 return generate_fair_guard(bol, region);
4356 }
4357
4358 //-----------------------inline_newNullRestrictedArray--------------------------
4359 // public static native Object[] newNullRestrictedArray(Class<?> componentType, int length);
4360 bool LibraryCallKit::inline_newNullRestrictedArray() {
4361 Node* componentType = argument(0);
4362 Node* length = argument(1);
4363
4364 const TypeInstPtr* tp = _gvn.type(componentType)->isa_instptr();
4365 if (tp != nullptr) {
4366 ciInstanceKlass* ik = tp->instance_klass();
4367 if (ik == C->env()->Class_klass()) {
4368 ciType* t = tp->java_mirror_type();
4369 if (t != nullptr && t->is_inlinetype()) {
4370 ciArrayKlass* array_klass = ciArrayKlass::make(t, true);
4371 if (array_klass->is_loaded() && array_klass->element_klass()->as_inline_klass()->is_initialized()) {
4372 const TypeAryKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces)->is_aryklassptr();
4373 array_klass_type = array_klass_type->cast_to_null_free();
4374 Node* obj = new_array(makecon(array_klass_type), length, 0, nullptr, false); // no arguments to push
4375 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj);
4376 alloc->set_null_free();
4377 set_result(obj);
4378 assert(gvn().type(obj)->is_aryptr()->is_null_free(), "must be null-free");
4379 return true;
4380 }
4381 }
4382 }
4383 }
4384 return false;
4385 }
4386
4387 //-----------------------inline_native_newArray--------------------------
4388 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
4389 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
4390 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
4391 Node* mirror;
4392 Node* count_val;
4393 if (uninitialized) {
4394 null_check_receiver();
4395 mirror = argument(1);
4396 count_val = argument(2);
4397 } else {
4398 mirror = argument(0);
4399 count_val = argument(1);
4400 }
4401
4402 mirror = null_check(mirror);
4403 // If mirror or obj is dead, only null-path is taken.
4404 if (stopped()) return true;
4405
4406 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
4407 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4408 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4514 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
4515 { PreserveReexecuteState preexecs(this);
4516 jvms()->set_should_reexecute(true);
4517
4518 array_type_mirror = null_check(array_type_mirror);
4519 original = null_check(original);
4520
4521 // Check if a null path was taken unconditionally.
4522 if (stopped()) return true;
4523
4524 Node* orig_length = load_array_length(original);
4525
4526 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0);
4527 klass_node = null_check(klass_node);
4528
4529 RegionNode* bailout = new RegionNode(1);
4530 record_for_igvn(bailout);
4531
4532 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
4533 // Bail out if that is so.
4534 // Inline type array may have object field that would require a
4535 // write barrier. Conservatively, go to slow path.
4536 // TODO 8251971: Optimize for the case when flat src/dst are later found
4537 // to not contain oops (i.e., move this check to the macro expansion phase).
4538 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4539 const TypeAryPtr* orig_t = _gvn.type(original)->isa_aryptr();
4540 const TypeKlassPtr* tklass = _gvn.type(klass_node)->is_klassptr();
4541 bool exclude_flat = UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, false, false, BarrierSetC2::Parsing) &&
4542 // Can src array be flat and contain oops?
4543 (orig_t == nullptr || (!orig_t->is_not_flat() && (!orig_t->is_flat() || orig_t->elem()->inline_klass()->contains_oops()))) &&
4544 // Can dest array be flat and contain oops?
4545 tklass->can_be_inline_array() && (!tklass->is_flat() || tklass->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->as_inline_klass()->contains_oops());
4546 Node* not_objArray = exclude_flat ? generate_non_objArray_guard(klass_node, bailout) : generate_typeArray_guard(klass_node, bailout);
4547 if (not_objArray != nullptr) {
4548 // Improve the klass node's type from the new optimistic assumption:
4549 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
4550 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
4551 Node* cast = new CastPPNode(klass_node, akls);
4552 cast->init_req(0, control());
4553 klass_node = _gvn.transform(cast);
4554 }
4555
4556 // Bail out if either start or end is negative.
4557 generate_negative_guard(start, bailout, &start);
4558 generate_negative_guard(end, bailout, &end);
4559
4560 Node* length = end;
4561 if (_gvn.type(start) != TypeInt::ZERO) {
4562 length = _gvn.transform(new SubINode(end, start));
4563 }
4564
4565 // Bail out if length is negative.
4566 // Without this the new_array would throw
4567 // NegativeArraySizeException but IllegalArgumentException is what
4568 // should be thrown
4569 generate_negative_guard(length, bailout, &length);
4570
4571 // Handle inline type arrays
4572 bool can_validate = !too_many_traps(Deoptimization::Reason_class_check);
4573 if (!stopped()) {
4574 orig_t = _gvn.type(original)->isa_aryptr();
4575 if (orig_t != nullptr && orig_t->is_flat()) {
4576 // Src is flat, check that dest is flat as well
4577 if (exclude_flat) {
4578 // Dest can't be flat, bail out
4579 bailout->add_req(control());
4580 set_control(top());
4581 } else {
4582 generate_fair_guard(flat_array_test(klass_node, /* flat = */ false), bailout);
4583 }
4584 } else if (UseFlatArray && (orig_t == nullptr || !orig_t->is_not_flat()) &&
4585 // If dest is flat, src must be flat as well (guaranteed by src <: dest check if validated).
4586 ((!tklass->is_flat() && tklass->can_be_inline_array()) || !can_validate)) {
4587 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
4588 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
4589 generate_fair_guard(flat_array_test(load_object_klass(original)), bailout);
4590 if (orig_t != nullptr) {
4591 orig_t = orig_t->cast_to_not_flat();
4592 original = _gvn.transform(new CheckCastPPNode(control(), original, orig_t));
4593 }
4594 }
4595 if (!can_validate) {
4596 // No validation. The subtype check emitted at macro expansion time will not go to the slow
4597 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
4598 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat/null-free.
4599 generate_fair_guard(null_free_array_test(klass_node), bailout);
4600 }
4601 }
4602
4603 if (bailout->req() > 1) {
4604 PreserveJVMState pjvms(this);
4605 set_control(_gvn.transform(bailout));
4606 uncommon_trap(Deoptimization::Reason_intrinsic,
4607 Deoptimization::Action_maybe_recompile);
4608 }
4609
4610 if (!stopped()) {
4611 // How many elements will we copy from the original?
4612 // The answer is MinI(orig_length - start, length).
4613 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
4614 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
4615
4616 // Generate a direct call to the right arraycopy function(s).
4617 // We know the copy is disjoint but we might not know if the
4618 // oop stores need checking.
4619 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
4620 // This will fail a store-check if x contains any non-nulls.
4621
4622 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
4625 // to the copyOf to be validated, including that the copy to the
4626 // new array won't trigger an ArrayStoreException. That subtype
4627 // check can be optimized if we know something on the type of
4628 // the input array from type speculation.
4629 if (_gvn.type(klass_node)->singleton()) {
4630 const TypeKlassPtr* subk = _gvn.type(load_object_klass(original))->is_klassptr();
4631 const TypeKlassPtr* superk = _gvn.type(klass_node)->is_klassptr();
4632
4633 int test = C->static_subtype_check(superk, subk);
4634 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
4635 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
4636 if (t_original->speculative_type() != nullptr) {
4637 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
4638 }
4639 }
4640 }
4641
4642 bool validated = false;
4643 // Reason_class_check rather than Reason_intrinsic because we
4644 // want to intrinsify even if this traps.
4645 if (can_validate) {
4646 Node* not_subtype_ctrl = gen_subtype_check(original, klass_node);
4647
4648 if (not_subtype_ctrl != top()) {
4649 PreserveJVMState pjvms(this);
4650 set_control(not_subtype_ctrl);
4651 uncommon_trap(Deoptimization::Reason_class_check,
4652 Deoptimization::Action_make_not_entrant);
4653 assert(stopped(), "Should be stopped");
4654 }
4655 validated = true;
4656 }
4657
4658 if (!stopped()) {
4659 newcopy = new_array(klass_node, length, 0); // no arguments to push
4660
4661 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4662 load_object_klass(original), klass_node);
4663 if (!is_copyOfRange) {
4664 ac->set_copyof(validated);
4665 } else {
4711
4712 //-----------------------generate_method_call----------------------------
4713 // Use generate_method_call to make a slow-call to the real
4714 // method if the fast path fails. An alternative would be to
4715 // use a stub like OptoRuntime::slow_arraycopy_Java.
4716 // This only works for expanding the current library call,
4717 // not another intrinsic. (E.g., don't use this for making an
4718 // arraycopy call inside of the copyOf intrinsic.)
4719 CallJavaNode*
4720 LibraryCallKit::generate_method_call(vmIntrinsicID method_id, bool is_virtual, bool is_static, bool res_not_null) {
4721 // When compiling the intrinsic method itself, do not use this technique.
4722 guarantee(callee() != C->method(), "cannot make slow-call to self");
4723
4724 ciMethod* method = callee();
4725 // ensure the JVMS we have will be correct for this call
4726 guarantee(method_id == method->intrinsic_id(), "must match");
4727
4728 const TypeFunc* tf = TypeFunc::make(method);
4729 if (res_not_null) {
4730 assert(tf->return_type() == T_OBJECT, "");
4731 const TypeTuple* range = tf->range_cc();
4732 const Type** fields = TypeTuple::fields(range->cnt());
4733 fields[TypeFunc::Parms] = range->field_at(TypeFunc::Parms)->filter_speculative(TypePtr::NOTNULL);
4734 const TypeTuple* new_range = TypeTuple::make(range->cnt(), fields);
4735 tf = TypeFunc::make(tf->domain_cc(), new_range);
4736 }
4737 CallJavaNode* slow_call;
4738 if (is_static) {
4739 assert(!is_virtual, "");
4740 slow_call = new CallStaticJavaNode(C, tf,
4741 SharedRuntime::get_resolve_static_call_stub(), method);
4742 } else if (is_virtual) {
4743 assert(!gvn().type(argument(0))->maybe_null(), "should not be null");
4744 int vtable_index = Method::invalid_vtable_index;
4745 if (UseInlineCaches) {
4746 // Suppress the vtable call
4747 } else {
4748 // hashCode and clone are not a miranda methods,
4749 // so the vtable index is fixed.
4750 // No need to use the linkResolver to get it.
4751 vtable_index = method->vtable_index();
4752 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4753 "bad index %d", vtable_index);
4754 }
4755 slow_call = new CallDynamicJavaNode(tf,
4772 set_edges_for_java_call(slow_call);
4773 return slow_call;
4774 }
4775
4776
4777 /**
4778 * Build special case code for calls to hashCode on an object. This call may
4779 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4780 * slightly different code.
4781 */
4782 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4783 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4784 assert(!(is_virtual && is_static), "either virtual, special, or static");
4785
4786 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4787
4788 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4789 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4790 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4791 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4792 Node* obj = argument(0);
4793
4794 if (gvn().type(obj)->is_inlinetypeptr()) {
4795 return false;
4796 }
4797
4798 if (!is_static) {
4799 // Check for hashing null object
4800 obj = null_check_receiver();
4801 if (stopped()) return true; // unconditionally null
4802 result_reg->init_req(_null_path, top());
4803 result_val->init_req(_null_path, top());
4804 } else {
4805 // Do a null check, and return zero if null.
4806 // System.identityHashCode(null) == 0
4807 Node* null_ctl = top();
4808 obj = null_check_oop(obj, &null_ctl);
4809 result_reg->init_req(_null_path, null_ctl);
4810 result_val->init_req(_null_path, _gvn.intcon(0));
4811 }
4812
4813 // Unconditionally null? Then return right away.
4814 if (stopped()) {
4815 set_control( result_reg->in(_null_path));
4816 if (!stopped())
4817 set_result(result_val->in(_null_path));
4818 return true;
4819 }
4820
4821 // We only go to the fast case code if we pass a number of guards. The
4822 // paths which do not pass are accumulated in the slow_region.
4823 RegionNode* slow_region = new RegionNode(1);
4824 record_for_igvn(slow_region);
4825
4826 // If this is a virtual call, we generate a funny guard. We pull out
4827 // the vtable entry corresponding to hashCode() from the target object.
4828 // If the target method which we are calling happens to be the native
4829 // Object hashCode() method, we pass the guard. We do not need this
4830 // guard for non-virtual calls -- the caller is known to be the native
4831 // Object hashCode().
4832 if (is_virtual) {
4833 // After null check, get the object's klass.
4834 Node* obj_klass = load_object_klass(obj);
4835 generate_virtual_guard(obj_klass, slow_region);
4836 }
4837
4838 // Get the header out of the object, use LoadMarkNode when available
4839 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4840 // The control of the load must be null. Otherwise, the load can move before
4841 // the null check after castPP removal.
4842 Node* no_ctrl = nullptr;
4843 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4844
4845 // Test the header to see if it is unlocked.
4846 // This also serves as guard against inline types
4847 Node *lock_mask = _gvn.MakeConX(markWord::inline_type_mask_in_place);
4848 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4849 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4850 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4851 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4852
4853 generate_slow_guard(test_unlocked, slow_region);
4854
4855 // Get the hash value and check to see that it has been properly assigned.
4856 // We depend on hash_mask being at most 32 bits and avoid the use of
4857 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4858 // vm: see markWord.hpp.
4859 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4860 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4861 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4862 // This hack lets the hash bits live anywhere in the mark object now, as long
4863 // as the shift drops the relevant bits into the low 32 bits. Note that
4864 // Java spec says that HashCode is an int so there's no point in capturing
4865 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4866 hshifted_header = ConvX2I(hshifted_header);
4867 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4893 // this->control() comes from set_results_for_java_call
4894 result_reg->init_req(_slow_path, control());
4895 result_val->init_req(_slow_path, slow_result);
4896 result_io ->set_req(_slow_path, i_o());
4897 result_mem ->set_req(_slow_path, reset_memory());
4898 }
4899
4900 // Return the combined state.
4901 set_i_o( _gvn.transform(result_io) );
4902 set_all_memory( _gvn.transform(result_mem));
4903
4904 set_result(result_reg, result_val);
4905 return true;
4906 }
4907
4908 //---------------------------inline_native_getClass----------------------------
4909 // public final native Class<?> java.lang.Object.getClass();
4910 //
4911 // Build special case code for calls to getClass on an object.
4912 bool LibraryCallKit::inline_native_getClass() {
4913 Node* obj = argument(0);
4914 if (obj->is_InlineType()) {
4915 const Type* t = _gvn.type(obj);
4916 if (t->maybe_null()) {
4917 null_check(obj);
4918 }
4919 set_result(makecon(TypeInstPtr::make(t->inline_klass()->java_mirror())));
4920 return true;
4921 }
4922 obj = null_check_receiver();
4923 if (stopped()) return true;
4924 set_result(load_mirror_from_klass(load_object_klass(obj)));
4925 return true;
4926 }
4927
4928 //-----------------inline_native_Reflection_getCallerClass---------------------
4929 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4930 //
4931 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4932 //
4933 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4934 // in that it must skip particular security frames and checks for
4935 // caller sensitive methods.
4936 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4937 #ifndef PRODUCT
4938 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4939 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4940 }
4941 #endif
4942
5203 if (C->get_alias_index(src_type) == C->get_alias_index(dst_type)) {
5204 flags |= RC_NARROW_MEM; // narrow in memory
5205 }
5206 }
5207
5208 // Call it. Note that the length argument is not scaled.
5209 make_runtime_call(flags,
5210 OptoRuntime::fast_arraycopy_Type(),
5211 StubRoutines::unsafe_arraycopy(),
5212 "unsafe_arraycopy",
5213 dst_type,
5214 src_addr, dst_addr, size XTOP);
5215
5216 store_to_memory(control(), doing_unsafe_access_addr, intcon(0), doing_unsafe_access_bt, Compile::AliasIdxRaw, MemNode::unordered);
5217
5218 return true;
5219 }
5220
5221 #undef XTOP
5222
5223 // TODO 8325106 Remove this and corresponding tests. Flatness is not a property of the Class anymore with JEP 401.
5224 //----------------------inline_unsafe_isFlatArray------------------------
5225 // public native boolean Unsafe.isFlatArray(Class<?> arrayClass);
5226 // This intrinsic exploits assumptions made by the native implementation
5227 // (arrayClass is neither null nor primitive) to avoid unnecessary null checks.
5228 bool LibraryCallKit::inline_unsafe_isFlatArray() {
5229 Node* cls = argument(1);
5230 Node* p = basic_plus_adr(cls, java_lang_Class::klass_offset());
5231 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p,
5232 TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT));
5233 Node* result = flat_array_test(kls);
5234 set_result(result);
5235 return true;
5236 }
5237
5238 //------------------------clone_coping-----------------------------------
5239 // Helper function for inline_native_clone.
5240 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
5241 assert(obj_size != nullptr, "");
5242 Node* raw_obj = alloc_obj->in(1);
5243 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
5244
5245 AllocateNode* alloc = nullptr;
5246 if (ReduceBulkZeroing) {
5247 // We will be completely responsible for initializing this object -
5248 // mark Initialize node as complete.
5249 alloc = AllocateNode::Ideal_allocation(alloc_obj);
5250 // The object was just allocated - there should be no any stores!
5251 guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), "");
5252 // Mark as complete_with_arraycopy so that on AllocateNode
5253 // expansion, we know this AllocateNode is initialized by an array
5254 // copy and a StoreStore barrier exists after the array copy.
5255 alloc->initialization()->set_complete_with_arraycopy();
5256 }
5257
5282 // not cloneable or finalizer => slow path to out-of-line Object.clone
5283 //
5284 // The general case has two steps, allocation and copying.
5285 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
5286 //
5287 // Copying also has two cases, oop arrays and everything else.
5288 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
5289 // Everything else uses the tight inline loop supplied by CopyArrayNode.
5290 //
5291 // These steps fold up nicely if and when the cloned object's klass
5292 // can be sharply typed as an object array, a type array, or an instance.
5293 //
5294 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
5295 PhiNode* result_val;
5296
5297 // Set the reexecute bit for the interpreter to reexecute
5298 // the bytecode that invokes Object.clone if deoptimization happens.
5299 { PreserveReexecuteState preexecs(this);
5300 jvms()->set_should_reexecute(true);
5301
5302 Node* obj = argument(0);
5303 obj = null_check_receiver();
5304 if (stopped()) return true;
5305
5306 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
5307
5308 // If we are going to clone an instance, we need its exact type to
5309 // know the number and types of fields to convert the clone to
5310 // loads/stores. Maybe a speculative type can help us.
5311 if (!obj_type->klass_is_exact() &&
5312 obj_type->speculative_type() != nullptr &&
5313 obj_type->speculative_type()->is_instance_klass() &&
5314 !obj_type->speculative_type()->is_inlinetype()) {
5315 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
5316 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
5317 !spec_ik->has_injected_fields()) {
5318 if (!obj_type->isa_instptr() ||
5319 obj_type->is_instptr()->instance_klass()->has_subklass()) {
5320 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
5321 }
5322 }
5323 }
5324
5325 // Conservatively insert a memory barrier on all memory slices.
5326 // Do not let writes into the original float below the clone.
5327 insert_mem_bar(Op_MemBarCPUOrder);
5328
5329 // paths into result_reg:
5330 enum {
5331 _slow_path = 1, // out-of-line call to clone method (virtual or not)
5332 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
5333 _array_path, // plain array allocation, plus arrayof_long_arraycopy
5334 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
5335 PATH_LIMIT
5336 };
5337 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
5338 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
5339 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
5340 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
5341 record_for_igvn(result_reg);
5342
5343 Node* obj_klass = load_object_klass(obj);
5344 // We only go to the fast case code if we pass a number of guards.
5345 // The paths which do not pass are accumulated in the slow_region.
5346 RegionNode* slow_region = new RegionNode(1);
5347 record_for_igvn(slow_region);
5348
5349 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr);
5350 if (array_ctl != nullptr) {
5351 // It's an array.
5352 PreserveJVMState pjvms(this);
5353 set_control(array_ctl);
5354
5355 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5356 const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
5357 if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
5358 obj_type->can_be_inline_array() &&
5359 (ary_ptr == nullptr || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
5360 // Flat inline type array may have object field that would require a
5361 // write barrier. Conservatively, go to slow path.
5362 generate_fair_guard(flat_array_test(obj_klass), slow_region);
5363 }
5364
5365 if (!stopped()) {
5366 Node* obj_length = load_array_length(obj);
5367 Node* array_size = nullptr; // Size of the array without object alignment padding.
5368 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &array_size, /*deoptimize_on_exception=*/true);
5369
5370 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
5371 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) {
5372 // If it is an oop array, it requires very special treatment,
5373 // because gc barriers are required when accessing the array.
5374 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr);
5375 if (is_obja != nullptr) {
5376 PreserveJVMState pjvms2(this);
5377 set_control(is_obja);
5378 // Generate a direct call to the right arraycopy function(s).
5379 // Clones are always tightly coupled.
5380 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, true, false);
5381 ac->set_clone_oop_array();
5382 Node* n = _gvn.transform(ac);
5383 assert(n == ac, "cannot disappear");
5384 ac->connect_outputs(this, /*deoptimize_on_exception=*/true);
5385
5386 result_reg->init_req(_objArray_path, control());
5387 result_val->init_req(_objArray_path, alloc_obj);
5388 result_i_o ->set_req(_objArray_path, i_o());
5389 result_mem ->set_req(_objArray_path, reset_memory());
5390 }
5391 }
5392 // Otherwise, there are no barriers to worry about.
5393 // (We can dispense with card marks if we know the allocation
5394 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
5395 // causes the non-eden paths to take compensating steps to
5396 // simulate a fresh allocation, so that no further
5397 // card marks are required in compiled code to initialize
5398 // the object.)
5399
5400 if (!stopped()) {
5401 copy_to_clone(obj, alloc_obj, array_size, true);
5402
5403 // Present the results of the copy.
5404 result_reg->init_req(_array_path, control());
5405 result_val->init_req(_array_path, alloc_obj);
5406 result_i_o ->set_req(_array_path, i_o());
5407 result_mem ->set_req(_array_path, reset_memory());
5408 }
5409 }
5410 }
5411
5412 if (!stopped()) {
5413 // It's an instance (we did array above). Make the slow-path tests.
5414 // If this is a virtual call, we generate a funny guard. We grab
5415 // the vtable entry corresponding to clone() from the target object.
5416 // If the target method which we are calling happens to be the
5417 // Object clone() method, we pass the guard. We do not need this
5418 // guard for non-virtual calls; the caller is known to be the native
5419 // Object clone().
5420 if (is_virtual) {
5421 generate_virtual_guard(obj_klass, slow_region);
5422 }
5423
5424 // The object must be easily cloneable and must not have a finalizer.
5425 // Both of these conditions may be checked in a single test.
5426 // We could optimize the test further, but we don't care.
5427 generate_access_flags_guard(obj_klass,
5428 // Test both conditions:
5429 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
5430 // Must be cloneable but not finalizer:
5431 JVM_ACC_IS_CLONEABLE_FAST,
5523 set_jvms(sfpt->jvms());
5524 _reexecute_sp = jvms()->sp();
5525
5526 return saved_jvms;
5527 }
5528 }
5529 }
5530 return nullptr;
5531 }
5532
5533 // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
5534 // such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
5535 SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
5536 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
5537 uint size = alloc->req();
5538 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
5539 old_jvms->set_map(sfpt);
5540 for (uint i = 0; i < size; i++) {
5541 sfpt->init_req(i, alloc->in(i));
5542 }
5543 int adjustment = 1;
5544 // TODO 8325106 why can't we check via the type of the const klass node?
5545 if (alloc->is_null_free()) {
5546 // A null-free, tightly coupled array allocation can only come from LibraryCallKit::inline_newNullRestrictedArray
5547 // which requires both the component type and the array length on stack for re-execution. Re-create and push
5548 // the component type.
5549 ciArrayKlass* klass = alloc->in(AllocateNode::KlassNode)->bottom_type()->is_aryklassptr()->exact_klass()->as_array_klass();
5550 ciInstance* instance = klass->component_mirror_instance();
5551 const TypeInstPtr* t_instance = TypeInstPtr::make(instance);
5552 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), makecon(t_instance));
5553 adjustment++;
5554 }
5555 // re-push array length for deoptimization
5556 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp() + adjustment - 1, alloc->in(AllocateNode::ALength));
5557 old_jvms->set_sp(old_jvms->sp() + adjustment);
5558 old_jvms->set_monoff(old_jvms->monoff() + adjustment);
5559 old_jvms->set_scloff(old_jvms->scloff() + adjustment);
5560 old_jvms->set_endoff(old_jvms->endoff() + adjustment);
5561 old_jvms->set_should_reexecute(true);
5562
5563 sfpt->set_i_o(map()->i_o());
5564 sfpt->set_memory(map()->memory());
5565 sfpt->set_control(map()->control());
5566 return sfpt;
5567 }
5568
5569 // In case of a deoptimization, we restart execution at the
5570 // allocation, allocating a new array. We would leave an uninitialized
5571 // array in the heap that GCs wouldn't expect. Move the allocation
5572 // after the traps so we don't allocate the array if we
5573 // deoptimize. This is possible because tightly_coupled_allocation()
5574 // guarantees there's no observer of the allocated array at this point
5575 // and the control flow is simple enough.
5576 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
5577 int saved_reexecute_sp, uint new_idx) {
5578 if (saved_jvms_before_guards != nullptr && !stopped()) {
5579 replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
5580
5581 assert(alloc != nullptr, "only with a tightly coupled allocation");
5582 // restore JVM state to the state at the arraycopy
5583 saved_jvms_before_guards->map()->set_control(map()->control());
5584 assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
5585 assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
5586 // If we've improved the types of some nodes (null check) while
5587 // emitting the guards, propagate them to the current state
5588 map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
5589 set_jvms(saved_jvms_before_guards);
5590 _reexecute_sp = saved_reexecute_sp;
5591
5592 // Remove the allocation from above the guards
5593 CallProjections* callprojs = alloc->extract_projections(true);
5594 InitializeNode* init = alloc->initialization();
5595 Node* alloc_mem = alloc->in(TypeFunc::Memory);
5596 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
5597 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
5598
5599 // The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
5600 // the allocation (i.e. is only valid if the allocation succeeds):
5601 // 1) replace CastIINode with AllocateArrayNode's length here
5602 // 2) Create CastIINode again once allocation has moved (see below) at the end of this method
5603 //
5604 // Multiple identical CastIINodes might exist here. Each GraphKit::load_array_length() call will generate
5605 // new separate CastIINode (arraycopy guard checks or any array length use between array allocation and ararycopy)
5606 Node* init_control = init->proj_out(TypeFunc::Control);
5607 Node* alloc_length = alloc->Ideal_length();
5608 #ifdef ASSERT
5609 Node* prev_cast = nullptr;
5610 #endif
5611 for (uint i = 0; i < init_control->outcnt(); i++) {
5612 Node* init_out = init_control->raw_out(i);
5613 if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) {
5614 #ifdef ASSERT
5615 if (prev_cast == nullptr) {
5616 prev_cast = init_out;
5618 if (prev_cast->cmp(*init_out) == false) {
5619 prev_cast->dump();
5620 init_out->dump();
5621 assert(false, "not equal CastIINode");
5622 }
5623 }
5624 #endif
5625 C->gvn_replace_by(init_out, alloc_length);
5626 }
5627 }
5628 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
5629
5630 // move the allocation here (after the guards)
5631 _gvn.hash_delete(alloc);
5632 alloc->set_req(TypeFunc::Control, control());
5633 alloc->set_req(TypeFunc::I_O, i_o());
5634 Node *mem = reset_memory();
5635 set_all_memory(mem);
5636 alloc->set_req(TypeFunc::Memory, mem);
5637 set_control(init->proj_out_or_null(TypeFunc::Control));
5638 set_i_o(callprojs->fallthrough_ioproj);
5639
5640 // Update memory as done in GraphKit::set_output_for_allocation()
5641 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
5642 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
5643 if (ary_type->isa_aryptr() && length_type != nullptr) {
5644 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
5645 }
5646 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
5647 int elemidx = C->get_alias_index(telemref);
5648 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
5649 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
5650
5651 Node* allocx = _gvn.transform(alloc);
5652 assert(allocx == alloc, "where has the allocation gone?");
5653 assert(dest->is_CheckCastPP(), "not an allocation result?");
5654
5655 _gvn.hash_delete(dest);
5656 dest->set_req(0, control());
5657 Node* destx = _gvn.transform(dest);
5658 assert(destx == dest, "where has the allocation result gone?");
5920 top_src = src_type->isa_aryptr();
5921 has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM);
5922 src_spec = true;
5923 }
5924 if (!has_dest) {
5925 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5926 dest_type = _gvn.type(dest);
5927 top_dest = dest_type->isa_aryptr();
5928 has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM);
5929 dest_spec = true;
5930 }
5931 }
5932 }
5933
5934 if (has_src && has_dest && can_emit_guards) {
5935 BasicType src_elem = top_src->isa_aryptr()->elem()->array_element_basic_type();
5936 BasicType dest_elem = top_dest->isa_aryptr()->elem()->array_element_basic_type();
5937 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
5938 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
5939
5940 if (src_elem == dest_elem && top_src->is_flat() == top_dest->is_flat() && src_elem == T_OBJECT) {
5941 // If both arrays are object arrays then having the exact types
5942 // for both will remove the need for a subtype check at runtime
5943 // before the call and may make it possible to pick a faster copy
5944 // routine (without a subtype check on every element)
5945 // Do we have the exact type of src?
5946 bool could_have_src = src_spec;
5947 // Do we have the exact type of dest?
5948 bool could_have_dest = dest_spec;
5949 ciKlass* src_k = nullptr;
5950 ciKlass* dest_k = nullptr;
5951 if (!src_spec) {
5952 src_k = src_type->speculative_type_not_null();
5953 if (src_k != nullptr && src_k->is_array_klass()) {
5954 could_have_src = true;
5955 }
5956 }
5957 if (!dest_spec) {
5958 dest_k = dest_type->speculative_type_not_null();
5959 if (dest_k != nullptr && dest_k->is_array_klass()) {
5960 could_have_dest = true;
5961 }
5962 }
5963 if (could_have_src && could_have_dest) {
5964 // If we can have both exact types, emit the missing guards
5965 if (could_have_src && !src_spec) {
5966 src = maybe_cast_profiled_obj(src, src_k, true);
5967 src_type = _gvn.type(src);
5968 top_src = src_type->isa_aryptr();
5969 }
5970 if (could_have_dest && !dest_spec) {
5971 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5972 dest_type = _gvn.type(dest);
5973 top_dest = dest_type->isa_aryptr();
5974 }
5975 }
5976 }
5977 }
5978
5979 ciMethod* trap_method = method();
5980 int trap_bci = bci();
5981 if (saved_jvms_before_guards != nullptr) {
5982 trap_method = alloc->jvms()->method();
5983 trap_bci = alloc->jvms()->bci();
5984 }
5985
5986 bool negative_length_guard_generated = false;
5987
5988 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5989 can_emit_guards && !src->is_top() && !dest->is_top()) {
5990 // validate arguments: enables transformation the ArrayCopyNode
5991 validated = true;
5992
5993 RegionNode* slow_region = new RegionNode(1);
5994 record_for_igvn(slow_region);
5995
5996 // (1) src and dest are arrays.
5997 generate_non_array_guard(load_object_klass(src), slow_region);
5998 generate_non_array_guard(load_object_klass(dest), slow_region);
5999
6000 // (2) src and dest arrays must have elements of the same BasicType
6001 // done at macro expansion or at Ideal transformation time
6002
6003 // (4) src_offset must not be negative.
6004 generate_negative_guard(src_offset, slow_region);
6005
6006 // (5) dest_offset must not be negative.
6007 generate_negative_guard(dest_offset, slow_region);
6008
6009 // (7) src_offset + length must not exceed length of src.
6012 slow_region);
6013
6014 // (8) dest_offset + length must not exceed length of dest.
6015 generate_limit_guard(dest_offset, length,
6016 load_array_length(dest),
6017 slow_region);
6018
6019 // (6) length must not be negative.
6020 // This is also checked in generate_arraycopy() during macro expansion, but
6021 // we also have to check it here for the case where the ArrayCopyNode will
6022 // be eliminated by Escape Analysis.
6023 if (EliminateAllocations) {
6024 generate_negative_guard(length, slow_region);
6025 negative_length_guard_generated = true;
6026 }
6027
6028 // (9) each element of an oop array must be assignable
6029 Node* dest_klass = load_object_klass(dest);
6030 if (src != dest) {
6031 Node* not_subtype_ctrl = gen_subtype_check(src, dest_klass);
6032 slow_region->add_req(not_subtype_ctrl);
6033 }
6034
6035 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
6036 const Type* toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
6037 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
6038 src_type = _gvn.type(src);
6039 top_src = src_type->isa_aryptr();
6040
6041 // Handle flat inline type arrays (null-free arrays are handled by the subtype check above)
6042 if (!stopped() && UseFlatArray) {
6043 // If dest is flat, src must be flat as well (guaranteed by src <: dest check). Handle flat src here.
6044 assert(top_dest == nullptr || !top_dest->is_flat() || top_src->is_flat(), "src array must be flat");
6045 if (top_src != nullptr && top_src->is_flat()) {
6046 // Src is flat, check that dest is flat as well
6047 if (top_dest != nullptr && !top_dest->is_flat()) {
6048 generate_fair_guard(flat_array_test(dest_klass, /* flat = */ false), slow_region);
6049 // Since dest is flat and src <: dest, dest must have the same type as src.
6050 top_dest = top_src->cast_to_exactness(false);
6051 assert(top_dest->is_flat(), "dest must be flat");
6052 dest = _gvn.transform(new CheckCastPPNode(control(), dest, top_dest));
6053 }
6054 } else if (top_src == nullptr || !top_src->is_not_flat()) {
6055 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
6056 // TODO 8251971: Optimize for the case when src/dest are later found to be both flat.
6057 assert(top_dest == nullptr || !top_dest->is_flat(), "dest array must not be flat");
6058 generate_fair_guard(flat_array_test(src), slow_region);
6059 if (top_src != nullptr) {
6060 top_src = top_src->cast_to_not_flat();
6061 src = _gvn.transform(new CheckCastPPNode(control(), src, top_src));
6062 }
6063 }
6064 }
6065
6066 {
6067 PreserveJVMState pjvms(this);
6068 set_control(_gvn.transform(slow_region));
6069 uncommon_trap(Deoptimization::Reason_intrinsic,
6070 Deoptimization::Action_make_not_entrant);
6071 assert(stopped(), "Should be stopped");
6072 }
6073 arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
6074 }
6075
6076 if (stopped()) {
6077 return true;
6078 }
6079
6080 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated,
6081 // Create LoadRange and LoadKlass nodes for use during macro expansion here
6082 // so the compiler has a chance to eliminate them: during macro expansion,
6083 // we have to set their control (CastPP nodes are eliminated).
6084 load_object_klass(src), load_object_klass(dest),
6085 load_array_length(src), load_array_length(dest));
6086
6087 ac->set_arraycopy(validated);
6088
6089 Node* n = _gvn.transform(ac);
6090 if (n == ac) {
6091 ac->connect_outputs(this);
6092 } else {
|