967 break;
968 }
969
970 case T_ARRAY: // fall through
971 case T_OBJECT: // fall through
972 if (UseCompressedOops && !wide) {
973 __ ldrw(dest->as_register(), as_Address(from_addr));
974 } else {
975 __ ldr(dest->as_register(), as_Address(from_addr));
976 }
977 break;
978 case T_METADATA:
979 // We get here to store a method pointer to the stack to pass to
980 // a dtrace runtime call. This can't work on 64 bit with
981 // compressed klass ptrs: T_METADATA can be a compressed klass
982 // ptr or a 64 bit method pointer.
983 ShouldNotReachHere();
984 __ ldr(dest->as_register(), as_Address(from_addr));
985 break;
986 case T_ADDRESS:
987 // FIXME: OMG this is a horrible kludge. Any offset from an
988 // address that matches klass_offset_in_bytes() will be loaded
989 // as a word, not a long.
990 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
991 __ ldrw(dest->as_register(), as_Address(from_addr));
992 } else {
993 __ ldr(dest->as_register(), as_Address(from_addr));
994 }
995 break;
996 case T_INT:
997 __ ldrw(dest->as_register(), as_Address(from_addr));
998 break;
999
1000 case T_LONG: {
1001 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
1002 break;
1003 }
1004
1005 case T_BYTE:
1006 __ ldrsb(dest->as_register(), as_Address(from_addr));
1007 break;
1008 case T_BOOLEAN: {
1009 __ ldrb(dest->as_register(), as_Address(from_addr));
1010 break;
1011 }
1012
1013 case T_CHAR:
1014 __ ldrh(dest->as_register(), as_Address(from_addr));
1015 break;
1016 case T_SHORT:
1017 __ ldrsh(dest->as_register(), as_Address(from_addr));
1018 break;
1019
1020 default:
1021 ShouldNotReachHere();
1022 }
1023
1024 if (is_reference_type(type)) {
1025 if (UseCompressedOops && !wide) {
1026 __ decode_heap_oop(dest->as_register());
1027 }
1028
1029 if (!UseZGC) {
1030 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1031 __ verify_oop(dest->as_register());
1032 }
1033 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1034 if (UseCompressedClassPointers) {
1035 __ decode_klass_not_null(dest->as_register());
1036 }
1037 }
1038 }
1039
1040
1041 int LIR_Assembler::array_element_size(BasicType type) const {
1042 int elem_size = type2aelembytes(type);
1043 return exact_log2(elem_size);
1044 }
1045
1046
1047 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1048 switch (op->code()) {
1049 case lir_idiv:
1050 case lir_irem:
1051 arithmetic_idiv(op->code(),
1052 op->in_opr1(),
1053 op->in_opr2(),
1054 op->in_opr3(),
1055 op->result_opr(),
1056 op->info());
1232 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1233 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1234 __ b(*op->stub()->entry());
1235 } else {
1236 Register tmp1 = op->tmp1()->as_register();
1237 Register tmp2 = op->tmp2()->as_register();
1238 Register tmp3 = op->tmp3()->as_register();
1239 if (len == tmp1) {
1240 tmp1 = tmp3;
1241 } else if (len == tmp2) {
1242 tmp2 = tmp3;
1243 } else if (len == tmp3) {
1244 // everything is ok
1245 } else {
1246 __ mov(tmp3, len);
1247 }
1248 __ allocate_array(op->obj()->as_register(),
1249 len,
1250 tmp1,
1251 tmp2,
1252 arrayOopDesc::header_size(op->type()),
1253 array_element_size(op->type()),
1254 op->klass()->as_register(),
1255 *op->stub()->entry());
1256 }
1257 __ bind(*op->stub()->continuation());
1258 }
1259
1260 void LIR_Assembler::type_profile_helper(Register mdo,
1261 ciMethodData *md, ciProfileData *data,
1262 Register recv, Label* update_done) {
1263 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1264 Label next_test;
1265 // See if the receiver is receiver[n].
1266 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1267 __ ldr(rscratch1, Address(rscratch2));
1268 __ cmp(recv, rscratch1);
1269 __ br(Assembler::NE, next_test);
1270 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1271 __ addptr(data_addr, DataLayout::counter_increment);
1272 __ b(*update_done);
2349 }
2350
2351 if (flags & LIR_OpArrayCopy::src_range_check) {
2352 __ addw(tmp, src_pos, length);
2353 __ ldrw(rscratch1, src_length_addr);
2354 __ cmpw(tmp, rscratch1);
2355 __ br(Assembler::HI, *stub->entry());
2356 }
2357 if (flags & LIR_OpArrayCopy::dst_range_check) {
2358 __ addw(tmp, dst_pos, length);
2359 __ ldrw(rscratch1, dst_length_addr);
2360 __ cmpw(tmp, rscratch1);
2361 __ br(Assembler::HI, *stub->entry());
2362 }
2363
2364 if (flags & LIR_OpArrayCopy::type_check) {
2365 // We don't know the array types are compatible
2366 if (basic_type != T_OBJECT) {
2367 // Simple test for basic type arrays
2368 if (UseCompressedClassPointers) {
2369 __ ldrw(tmp, src_klass_addr);
2370 __ ldrw(rscratch1, dst_klass_addr);
2371 __ cmpw(tmp, rscratch1);
2372 } else {
2373 __ ldr(tmp, src_klass_addr);
2374 __ ldr(rscratch1, dst_klass_addr);
2375 __ cmp(tmp, rscratch1);
2376 }
2377 __ br(Assembler::NE, *stub->entry());
2378 } else {
2379 // For object arrays, if src is a sub class of dst then we can
2380 // safely do the copy.
2381 Label cont, slow;
2382
2383 #define PUSH(r1, r2) \
2384 stp(r1, r2, __ pre(sp, -2 * wordSize));
2385
2386 #define POP(r1, r2) \
2387 ldp(r1, r2, __ post(sp, 2 * wordSize));
2388
2389 __ PUSH(src, dst);
2390
2391 __ load_klass(src, src);
2392 __ load_klass(dst, dst);
2393
2394 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2478 }
2479
2480 __ b(*stub->entry());
2481
2482 __ bind(cont);
2483 __ POP(src, dst);
2484 }
2485 }
2486
2487 #ifdef ASSERT
2488 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2489 // Sanity check the known type with the incoming class. For the
2490 // primitive case the types must match exactly with src.klass and
2491 // dst.klass each exactly matching the default type. For the
2492 // object array case, if no type check is needed then either the
2493 // dst type is exactly the expected type and the src type is a
2494 // subtype which we can't check or src is the same array as dst
2495 // but not necessarily exactly of type default_type.
2496 Label known_ok, halt;
2497 __ mov_metadata(tmp, default_type->constant_encoding());
2498 if (UseCompressedClassPointers) {
2499 __ encode_klass_not_null(tmp);
2500 }
2501
2502 if (basic_type != T_OBJECT) {
2503
2504 if (UseCompressedClassPointers) {
2505 __ ldrw(rscratch1, dst_klass_addr);
2506 __ cmpw(tmp, rscratch1);
2507 } else {
2508 __ ldr(rscratch1, dst_klass_addr);
2509 __ cmp(tmp, rscratch1);
2510 }
2511 __ br(Assembler::NE, halt);
2512 if (UseCompressedClassPointers) {
2513 __ ldrw(rscratch1, src_klass_addr);
2514 __ cmpw(tmp, rscratch1);
2515 } else {
2516 __ ldr(rscratch1, src_klass_addr);
2517 __ cmp(tmp, rscratch1);
2518 }
2519 __ br(Assembler::EQ, known_ok);
2520 } else {
2521 if (UseCompressedClassPointers) {
2522 __ ldrw(rscratch1, dst_klass_addr);
2523 __ cmpw(tmp, rscratch1);
2524 } else {
2525 __ ldr(rscratch1, dst_klass_addr);
2526 __ cmp(tmp, rscratch1);
2527 }
2528 __ br(Assembler::EQ, known_ok);
2529 __ cmp(src, dst);
2530 __ br(Assembler::EQ, known_ok);
2531 }
2532 __ bind(halt);
2533 __ stop("incorrect type information in arraycopy");
2534 __ bind(known_ok);
2535 }
2536 #endif
2537
2538 #ifndef PRODUCT
2539 if (PrintC1Statistics) {
2540 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2541 }
2542 #endif
2543
2544 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2545 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2546 assert_different_registers(c_rarg0, dst, dst_pos, length);
2547 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2555 const char *name;
2556 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2557
2558 CodeBlob *cb = CodeCache::find_blob(entry);
2559 if (cb) {
2560 __ far_call(RuntimeAddress(entry));
2561 } else {
2562 __ call_VM_leaf(entry, 3);
2563 }
2564
2565 __ bind(*stub->continuation());
2566 }
2567
2568
2569
2570
2571 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2572 Register obj = op->obj_opr()->as_register(); // may not be an oop
2573 Register hdr = op->hdr_opr()->as_register();
2574 Register lock = op->lock_opr()->as_register();
2575 if (!UseFastLocking) {
2576 __ b(*op->stub()->entry());
2577 } else if (op->code() == lir_lock) {
2578 Register scratch = noreg;
2579 if (UseBiasedLocking) {
2580 scratch = op->scratch_opr()->as_register();
2581 }
2582 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2583 // add debug info for NullPointerException only if one is possible
2584 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2585 if (op->info() != NULL) {
2586 add_debug_info_for_null_check(null_check_offset, op->info());
2587 }
2588 // done
2589 } else if (op->code() == lir_unlock) {
2590 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2591 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2592 } else {
2593 Unimplemented();
2594 }
2595 __ bind(*op->stub()->continuation());
2596 }
2597
2598
2599 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2600 ciMethod* method = op->profiled_method();
2601 int bci = op->profiled_bci();
2602 ciMethod* callee = op->profiled_callee();
2603
2604 // Update counter for all call types
2605 ciMethodData* md = method->method_data_or_null();
2606 assert(md != NULL, "Sanity");
2607 ciProfileData* data = md->bci_to_data(bci);
2608 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2609 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2610 Register mdo = op->mdo()->as_register();
2611 __ mov_metadata(mdo, md->constant_encoding());
2612 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2613 // Perform additional virtual call profiling for invokevirtual and
2614 // invokeinterface bytecodes
2615 if (op->should_profile_receiver_type()) {
2616 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2617 Register recv = op->recv()->as_register();
|
967 break;
968 }
969
970 case T_ARRAY: // fall through
971 case T_OBJECT: // fall through
972 if (UseCompressedOops && !wide) {
973 __ ldrw(dest->as_register(), as_Address(from_addr));
974 } else {
975 __ ldr(dest->as_register(), as_Address(from_addr));
976 }
977 break;
978 case T_METADATA:
979 // We get here to store a method pointer to the stack to pass to
980 // a dtrace runtime call. This can't work on 64 bit with
981 // compressed klass ptrs: T_METADATA can be a compressed klass
982 // ptr or a 64 bit method pointer.
983 ShouldNotReachHere();
984 __ ldr(dest->as_register(), as_Address(from_addr));
985 break;
986 case T_ADDRESS:
987 __ ldr(dest->as_register(), as_Address(from_addr));
988 break;
989 case T_INT:
990 __ ldrw(dest->as_register(), as_Address(from_addr));
991 break;
992
993 case T_LONG: {
994 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
995 break;
996 }
997
998 case T_BYTE:
999 __ ldrsb(dest->as_register(), as_Address(from_addr));
1000 break;
1001 case T_BOOLEAN: {
1002 __ ldrb(dest->as_register(), as_Address(from_addr));
1003 break;
1004 }
1005
1006 case T_CHAR:
1007 __ ldrh(dest->as_register(), as_Address(from_addr));
1008 break;
1009 case T_SHORT:
1010 __ ldrsh(dest->as_register(), as_Address(from_addr));
1011 break;
1012
1013 default:
1014 ShouldNotReachHere();
1015 }
1016
1017 if (is_reference_type(type)) {
1018 if (UseCompressedOops && !wide) {
1019 __ decode_heap_oop(dest->as_register());
1020 }
1021
1022 if (!UseZGC) {
1023 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1024 __ verify_oop(dest->as_register());
1025 }
1026 }
1027 }
1028
1029
1030 int LIR_Assembler::array_element_size(BasicType type) const {
1031 int elem_size = type2aelembytes(type);
1032 return exact_log2(elem_size);
1033 }
1034
1035
1036 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1037 switch (op->code()) {
1038 case lir_idiv:
1039 case lir_irem:
1040 arithmetic_idiv(op->code(),
1041 op->in_opr1(),
1042 op->in_opr2(),
1043 op->in_opr3(),
1044 op->result_opr(),
1045 op->info());
1221 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1222 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1223 __ b(*op->stub()->entry());
1224 } else {
1225 Register tmp1 = op->tmp1()->as_register();
1226 Register tmp2 = op->tmp2()->as_register();
1227 Register tmp3 = op->tmp3()->as_register();
1228 if (len == tmp1) {
1229 tmp1 = tmp3;
1230 } else if (len == tmp2) {
1231 tmp2 = tmp3;
1232 } else if (len == tmp3) {
1233 // everything is ok
1234 } else {
1235 __ mov(tmp3, len);
1236 }
1237 __ allocate_array(op->obj()->as_register(),
1238 len,
1239 tmp1,
1240 tmp2,
1241 arrayOopDesc::base_offset_in_bytes(op->type()),
1242 array_element_size(op->type()),
1243 op->klass()->as_register(),
1244 *op->stub()->entry());
1245 }
1246 __ bind(*op->stub()->continuation());
1247 }
1248
1249 void LIR_Assembler::type_profile_helper(Register mdo,
1250 ciMethodData *md, ciProfileData *data,
1251 Register recv, Label* update_done) {
1252 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1253 Label next_test;
1254 // See if the receiver is receiver[n].
1255 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1256 __ ldr(rscratch1, Address(rscratch2));
1257 __ cmp(recv, rscratch1);
1258 __ br(Assembler::NE, next_test);
1259 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1260 __ addptr(data_addr, DataLayout::counter_increment);
1261 __ b(*update_done);
2338 }
2339
2340 if (flags & LIR_OpArrayCopy::src_range_check) {
2341 __ addw(tmp, src_pos, length);
2342 __ ldrw(rscratch1, src_length_addr);
2343 __ cmpw(tmp, rscratch1);
2344 __ br(Assembler::HI, *stub->entry());
2345 }
2346 if (flags & LIR_OpArrayCopy::dst_range_check) {
2347 __ addw(tmp, dst_pos, length);
2348 __ ldrw(rscratch1, dst_length_addr);
2349 __ cmpw(tmp, rscratch1);
2350 __ br(Assembler::HI, *stub->entry());
2351 }
2352
2353 if (flags & LIR_OpArrayCopy::type_check) {
2354 // We don't know the array types are compatible
2355 if (basic_type != T_OBJECT) {
2356 // Simple test for basic type arrays
2357 if (UseCompressedClassPointers) {
2358 __ load_nklass(tmp, src);
2359 __ load_nklass(rscratch1, dst);
2360 __ cmpw(tmp, rscratch1);
2361 } else {
2362 __ ldr(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
2363 __ ldr(rscratch1, Address(dst, oopDesc::klass_offset_in_bytes()));
2364 __ cmp(tmp, rscratch1);
2365 }
2366 __ br(Assembler::NE, *stub->entry());
2367 } else {
2368 // For object arrays, if src is a sub class of dst then we can
2369 // safely do the copy.
2370 Label cont, slow;
2371
2372 #define PUSH(r1, r2) \
2373 stp(r1, r2, __ pre(sp, -2 * wordSize));
2374
2375 #define POP(r1, r2) \
2376 ldp(r1, r2, __ post(sp, 2 * wordSize));
2377
2378 __ PUSH(src, dst);
2379
2380 __ load_klass(src, src);
2381 __ load_klass(dst, dst);
2382
2383 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2467 }
2468
2469 __ b(*stub->entry());
2470
2471 __ bind(cont);
2472 __ POP(src, dst);
2473 }
2474 }
2475
2476 #ifdef ASSERT
2477 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2478 // Sanity check the known type with the incoming class. For the
2479 // primitive case the types must match exactly with src.klass and
2480 // dst.klass each exactly matching the default type. For the
2481 // object array case, if no type check is needed then either the
2482 // dst type is exactly the expected type and the src type is a
2483 // subtype which we can't check or src is the same array as dst
2484 // but not necessarily exactly of type default_type.
2485 Label known_ok, halt;
2486 __ mov_metadata(tmp, default_type->constant_encoding());
2487
2488 if (basic_type != T_OBJECT) {
2489 __ cmp_klass(dst, tmp, rscratch1);
2490 __ br(Assembler::NE, halt);
2491 __ cmp_klass(src, tmp, rscratch1);
2492 __ br(Assembler::EQ, known_ok);
2493 } else {
2494 __ cmp_klass(dst, tmp, rscratch1);
2495 __ br(Assembler::EQ, known_ok);
2496 __ cmp(src, dst);
2497 __ br(Assembler::EQ, known_ok);
2498 }
2499 __ bind(halt);
2500 __ stop("incorrect type information in arraycopy");
2501 __ bind(known_ok);
2502 }
2503 #endif
2504
2505 #ifndef PRODUCT
2506 if (PrintC1Statistics) {
2507 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2508 }
2509 #endif
2510
2511 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2512 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2513 assert_different_registers(c_rarg0, dst, dst_pos, length);
2514 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2522 const char *name;
2523 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2524
2525 CodeBlob *cb = CodeCache::find_blob(entry);
2526 if (cb) {
2527 __ far_call(RuntimeAddress(entry));
2528 } else {
2529 __ call_VM_leaf(entry, 3);
2530 }
2531
2532 __ bind(*stub->continuation());
2533 }
2534
2535
2536
2537
2538 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2539 Register obj = op->obj_opr()->as_register(); // may not be an oop
2540 Register hdr = op->hdr_opr()->as_register();
2541 Register lock = op->lock_opr()->as_register();
2542 if (LockingMode == LM_MONITOR) {
2543 if (op->info() != NULL) {
2544 add_debug_info_for_null_check_here(op->info());
2545 __ null_check(obj, -1);
2546 }
2547 __ b(*op->stub()->entry());
2548 } else if (op->code() == lir_lock) {
2549 Register scratch = noreg;
2550 if (UseBiasedLocking) {
2551 scratch = op->scratch_opr()->as_register();
2552 }
2553 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2554 // add debug info for NullPointerException only if one is possible
2555 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2556 if (op->info() != NULL) {
2557 add_debug_info_for_null_check(null_check_offset, op->info());
2558 }
2559 // done
2560 } else if (op->code() == lir_unlock) {
2561 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2562 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2563 } else {
2564 Unimplemented();
2565 }
2566 __ bind(*op->stub()->continuation());
2567 }
2568
2569 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2570 Register obj = op->obj()->as_pointer_register();
2571 Register result = op->result_opr()->as_pointer_register();
2572
2573 CodeEmitInfo* info = op->info();
2574 if (info != NULL) {
2575 add_debug_info_for_null_check_here(info);
2576 }
2577
2578 if (UseCompressedClassPointers) {
2579 if (UseCompactObjectHeaders) {
2580 // Check if we can take the (common) fast path, if obj is unlocked.
2581 __ ldr(result, Address(obj, oopDesc::mark_offset_in_bytes()));
2582 __ tst(result, markWord::monitor_value);
2583 __ br(Assembler::NE, *op->stub()->entry());
2584 __ bind(*op->stub()->continuation());
2585
2586 // Shift to get proper narrow Klass*.
2587 __ lsr(result, result, markWord::klass_shift);
2588 } else {
2589 __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2590 }
2591 __ decode_klass_not_null(result);
2592 } else {
2593 __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes()));
2594 }
2595 }
2596
2597 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2598 ciMethod* method = op->profiled_method();
2599 int bci = op->profiled_bci();
2600 ciMethod* callee = op->profiled_callee();
2601
2602 // Update counter for all call types
2603 ciMethodData* md = method->method_data_or_null();
2604 assert(md != NULL, "Sanity");
2605 ciProfileData* data = md->bci_to_data(bci);
2606 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2607 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2608 Register mdo = op->mdo()->as_register();
2609 __ mov_metadata(mdo, md->constant_encoding());
2610 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2611 // Perform additional virtual call profiling for invokevirtual and
2612 // invokeinterface bytecodes
2613 if (op->should_profile_receiver_type()) {
2614 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2615 Register recv = op->recv()->as_register();
|