< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "compiler/compiler_globals.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interp_masm_aarch64.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "logging/log.hpp"
  34 #include "oops/arrayOop.hpp"

  35 #include "oops/markWord.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/methodData.hpp"

  38 #include "oops/resolvedFieldEntry.hpp"
  39 #include "oops/resolvedIndyEntry.hpp"
  40 #include "oops/resolvedMethodEntry.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "prims/jvmtiThreadState.hpp"
  43 #include "runtime/basicLock.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/javaThread.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "utilities/powerOfTwo.hpp"
  49 
  50 void InterpreterMacroAssembler::narrow(Register result) {
  51 
  52   // Get method->_constMethod->_result_type
  53   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  54   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  55   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  56 
  57   Label done, notBool, notByte, notChar;

 191     ldrw(index, Address(rbcp, bcp_offset));
 192   } else if (index_size == sizeof(u1)) {
 193     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 194   } else {
 195     ShouldNotReachHere();
 196   }
 197 }
 198 
 199 void InterpreterMacroAssembler::get_method_counters(Register method,
 200                                                     Register mcs, Label& skip) {
 201   Label has_counters;
 202   ldr(mcs, Address(method, Method::method_counters_offset()));
 203   cbnz(mcs, has_counters);
 204   call_VM(noreg, CAST_FROM_FN_PTR(address,
 205           InterpreterRuntime::build_method_counters), method);
 206   ldr(mcs, Address(method, Method::method_counters_offset()));
 207   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 208   bind(has_counters);
 209 }
 210 



























































 211 // Load object from cpool->resolved_references(index)
 212 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 213                                            Register result, Register index, Register tmp) {
 214   assert_different_registers(result, index);
 215 
 216   get_constant_pool(result);
 217   // load pointer for resolved_references[] objArray
 218   ldr(result, Address(result, ConstantPool::cache_offset()));
 219   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 220   resolve_oop_handle(result, tmp, rscratch2);
 221   // Add in the index
 222   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 223   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 224 }
 225 
 226 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 227                              Register cpool, Register index, Register klass, Register temp) {
 228   add(temp, cpool, index, LSL, LogBytesPerWord);
 229   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 230   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 231   add(klass, klass, temp, LSL, LogBytesPerWord);
 232   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 233 }
 234 
 235 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 236 // subtype of super_klass.
 237 //
 238 // Args:
 239 //      r0: superklass
 240 //      Rsub_klass: subklass
 241 //
 242 // Kills:
 243 //      r2, r5
 244 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 245                                                   Label& ok_is_subtype) {

 246   assert(Rsub_klass != r0, "r0 holds superklass");
 247   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 248   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 249 
 250   // Profile the not-null value's klass.
 251   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5


 252 
 253   // Do the check.
 254   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 255 }
 256 
 257 // Java Expression Stack
 258 
 259 void InterpreterMacroAssembler::pop_ptr(Register r) {
 260   ldr(r, post(esp, wordSize));
 261 }
 262 
 263 void InterpreterMacroAssembler::pop_i(Register r) {
 264   ldrw(r, post(esp, wordSize));
 265 }
 266 
 267 void InterpreterMacroAssembler::pop_l(Register r) {
 268   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 269 }
 270 
 271 void InterpreterMacroAssembler::push_ptr(Register r) {

 605 
 606     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 607     bind(entry);
 608     cmp(c_rarg1, r19); // check if bottom reached
 609     br(Assembler::NE, loop); // if not at bottom then check this entry
 610   }
 611 
 612   bind(no_unlock);
 613 
 614   // jvmti support
 615   if (notify_jvmdi) {
 616     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 617   } else {
 618     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 619   }
 620 
 621   // remove activation
 622   // get sender esp
 623   ldr(rscratch2,
 624       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 625   if (StackReservedPages > 0) {
 626     // testing if reserved zone needs to be re-enabled
 627     Label no_reserved_zone_enabling;
 628 
 629     // check if already enabled - if so no re-enabling needed
 630     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 631     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 632     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 633     br(Assembler::EQ, no_reserved_zone_enabling);
 634 
 635     // look for an overflow into the stack reserved zone, i.e.
 636     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 637     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 638     cmp(rscratch2, rscratch1);
 639     br(Assembler::LS, no_reserved_zone_enabling);
 640 
 641     call_VM_leaf(
 642       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 643     call_VM(noreg, CAST_FROM_FN_PTR(address,
 644                    InterpreterRuntime::throw_delayed_StackOverflowError));
 645     should_not_reach_here();
 646 
 647     bind(no_reserved_zone_enabling);
 648   }
 649 































 650   // restore sender esp
 651   mov(esp, rscratch2);
 652   // remove frame anchor
 653   leave();
 654   // If we're returning to interpreted code we will shortly be
 655   // adjusting SP to allow some space for ESP.  If we're returning to
 656   // compiled code the saved sender SP was saved in sender_sp, so this
 657   // restores it.
 658   andr(sp, esp, -16);
 659 }
 660 
 661 // Lock object
 662 //
 663 // Args:
 664 //      c_rarg1: BasicObjectLock to be used for locking
 665 //
 666 // Kills:
 667 //      r0
 668 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 669 //      rscratch1, rscratch2 (scratch regs)

 690 
 691     Label slow_case;
 692 
 693     // Load object pointer into obj_reg %c_rarg3
 694     ldr(obj_reg, Address(lock_reg, obj_offset));
 695 
 696     if (DiagnoseSyncOnValueBasedClasses != 0) {
 697       load_klass(tmp, obj_reg);
 698       ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
 699       tst(tmp, KlassFlags::_misc_is_value_based_class);
 700       br(Assembler::NE, slow_case);
 701     }
 702 
 703     if (LockingMode == LM_LIGHTWEIGHT) {
 704       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 705       b(done);
 706     } else if (LockingMode == LM_LEGACY) {
 707       // Load (object->mark() | 1) into swap_reg
 708       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 709       orr(swap_reg, rscratch1, 1);




 710 
 711       // Save (object->mark() | 1) into BasicLock's displaced header
 712       str(swap_reg, Address(lock_reg, mark_offset));
 713 
 714       assert(lock_offset == 0,
 715              "displached header must be first word in BasicObjectLock");
 716 
 717       Label fail;
 718       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 719 
 720       // Fast check for recursive lock.
 721       //
 722       // Can apply the optimization only if this is a stack lock
 723       // allocated in this thread. For efficiency, we can focus on
 724       // recently allocated stack locks (instead of reading the stack
 725       // base and checking whether 'mark' points inside the current
 726       // thread stack):
 727       //  1) (mark & 7) == 0, and
 728       //  2) sp <= mark < mark + os::pagesize()
 729       //

1043     Address data(mdp, in_bytes(JumpData::taken_offset()));
1044     ldr(bumped_count, data);
1045     assert(DataLayout::counter_increment == 1,
1046             "flow-free idiom only works with 1");
1047     // Intel does this to catch overflow
1048     // addptr(bumped_count, DataLayout::counter_increment);
1049     // sbbptr(bumped_count, 0);
1050     // so we do this
1051     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1052     Label L;
1053     br(Assembler::CS, L);       // skip store if counter overflow
1054     str(bumped_count, data);
1055     bind(L);
1056     // The method data pointer needs to be updated to reflect the new target.
1057     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1058     bind(profile_continue);
1059   }
1060 }
1061 
1062 
1063 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1064   if (ProfileInterpreter) {
1065     Label profile_continue;
1066 
1067     // If no method data exists, go to profile_continue.
1068     test_method_data_pointer(mdp, profile_continue);
1069 
1070     // We are taking a branch.  Increment the not taken count.
1071     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1072 
1073     // The method data pointer needs to be updated to correspond to
1074     // the next bytecode
1075     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1076     bind(profile_continue);
1077   }
1078 }
1079 
1080 
1081 void InterpreterMacroAssembler::profile_call(Register mdp) {
1082   if (ProfileInterpreter) {
1083     Label profile_continue;
1084 
1085     // If no method data exists, go to profile_continue.
1086     test_method_data_pointer(mdp, profile_continue);
1087 
1088     // We are making a call.  Increment the count.
1089     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1090 
1091     // The method data pointer needs to be updated to reflect the new target.
1092     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1093     bind(profile_continue);
1094   }
1095 }

1378     // case_array_offset_in_bytes()
1379     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1380     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1381     Assembler::maddw(index, index, reg2, rscratch1);
1382 
1383     // Update the case count
1384     increment_mdp_data_at(mdp,
1385                           index,
1386                           in_bytes(MultiBranchData::relative_count_offset()));
1387 
1388     // The method data pointer needs to be updated.
1389     update_mdp_by_offset(mdp,
1390                          index,
1391                          in_bytes(MultiBranchData::
1392                                   relative_displacement_offset()));
1393 
1394     bind(profile_continue);
1395   }
1396 }
1397 


















































































































1398 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1399   if (state == atos) {
1400     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1401   }
1402 }
1403 
1404 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1405 
1406 
1407 void InterpreterMacroAssembler::notify_method_entry() {
1408   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1409   // track stack depth.  If it is possible to enter interp_only_mode we add
1410   // the code to check if the event should be sent.
1411   if (JvmtiExport::can_post_interpreter_events()) {
1412     Label L;
1413     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1414     cbzw(r3, L);
1415     call_VM(noreg, CAST_FROM_FN_PTR(address,
1416                                     InterpreterRuntime::post_method_entry));
1417     bind(L);

1679         profile_obj_type(tmp, mdo_arg_addr);
1680 
1681         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1682         off_to_args += to_add;
1683       }
1684 
1685       if (MethodData::profile_return()) {
1686         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1687         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1688       }
1689 
1690       add(rscratch1, mdp, off_to_args);
1691       bind(done);
1692       mov(mdp, rscratch1);
1693 
1694       if (MethodData::profile_return()) {
1695         // We're right after the type profile for the last
1696         // argument. tmp is the number of cells left in the
1697         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1698         // if there's a return to profile.
1699         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1700         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1701       }
1702       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1703     } else {
1704       assert(MethodData::profile_return(), "either profile call args or call ret");
1705       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1706     }
1707 
1708     // mdp points right after the end of the
1709     // CallTypeData/VirtualCallTypeData, right after the cells for the
1710     // return value type if there's one
1711 
1712     bind(profile_continue);
1713   }
1714 }
1715 
1716 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1717   assert_different_registers(mdp, ret, tmp, rbcp);
1718   if (ProfileInterpreter && MethodData::profile_return()) {
1719     Label profile_continue, done;

1725 
1726       // If we don't profile all invoke bytecodes we must make sure
1727       // it's a bytecode we indeed profile. We can't go back to the
1728       // beginning of the ProfileData we intend to update to check its
1729       // type because we're right after it and we don't known its
1730       // length
1731       Label do_profile;
1732       ldrb(rscratch1, Address(rbcp, 0));
1733       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1734       br(Assembler::EQ, do_profile);
1735       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1736       br(Assembler::EQ, do_profile);
1737       get_method(tmp);
1738       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1739       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1740       br(Assembler::NE, profile_continue);
1741 
1742       bind(do_profile);
1743     }
1744 
1745     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1746     mov(tmp, ret);
1747     profile_obj_type(tmp, mdo_ret_addr);
1748 
1749     bind(profile_continue);
1750   }
1751 }
1752 
1753 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1754   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1755   if (ProfileInterpreter && MethodData::profile_parameters()) {
1756     Label profile_continue, done;
1757 
1758     test_method_data_pointer(mdp, profile_continue);
1759 
1760     // Load the offset of the area within the MDO used for
1761     // parameters. If it's negative we're not profiling any parameters
1762     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1763     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1764 
1765     // Compute a pointer to the area for parameters from the offset

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "compiler/compiler_globals.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interp_masm_aarch64.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "logging/log.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/constMethodFlags.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "oops/resolvedFieldEntry.hpp"
  41 #include "oops/resolvedIndyEntry.hpp"
  42 #include "oops/resolvedMethodEntry.hpp"
  43 #include "prims/jvmtiExport.hpp"
  44 #include "prims/jvmtiThreadState.hpp"
  45 #include "runtime/basicLock.hpp"
  46 #include "runtime/frame.inline.hpp"
  47 #include "runtime/javaThread.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 void InterpreterMacroAssembler::narrow(Register result) {
  53 
  54   // Get method->_constMethod->_result_type
  55   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  56   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  57   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  58 
  59   Label done, notBool, notByte, notChar;

 193     ldrw(index, Address(rbcp, bcp_offset));
 194   } else if (index_size == sizeof(u1)) {
 195     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 196   } else {
 197     ShouldNotReachHere();
 198   }
 199 }
 200 
 201 void InterpreterMacroAssembler::get_method_counters(Register method,
 202                                                     Register mcs, Label& skip) {
 203   Label has_counters;
 204   ldr(mcs, Address(method, Method::method_counters_offset()));
 205   cbnz(mcs, has_counters);
 206   call_VM(noreg, CAST_FROM_FN_PTR(address,
 207           InterpreterRuntime::build_method_counters), method);
 208   ldr(mcs, Address(method, Method::method_counters_offset()));
 209   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 210   bind(has_counters);
 211 }
 212 
 213 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 214                                                   Register t1, Register t2,
 215                                                   bool clear_fields, Label& alloc_failed) {
 216   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 217   if (DTraceMethodProbes) {
 218       // Trigger dtrace event for fastpath
 219     push(atos);
 220     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
 221     pop(atos);
 222   }
 223 }
 224 
 225 void InterpreterMacroAssembler::read_flat_field(Register entry,
 226                                                 Register field_index, Register field_offset,
 227                                                 Register temp, Register obj) {
 228   Label alloc_failed, empty_value, done;
 229   const Register src = field_offset;
 230   const Register alloc_temp = r10;
 231   const Register dst_temp   = field_index;
 232   const Register layout_info = temp;
 233   assert_different_registers(obj, entry, field_index, field_offset, temp, alloc_temp);
 234 
 235   // Grab the inline field klass
 236   ldr(rscratch1, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
 237   inline_layout_info(rscratch1, field_index, layout_info);
 238 
 239   const Register field_klass = dst_temp;
 240   ldr(field_klass, Address(layout_info, in_bytes(InlineLayoutInfo::klass_offset())));
 241 
 242   // check for empty value klass
 243   test_klass_is_empty_inline_type(field_klass, rscratch1, empty_value);
 244 
 245   // allocate buffer
 246   push(obj); // save holder
 247   allocate_instance(field_klass, obj, alloc_temp, rscratch2, false, alloc_failed);
 248 
 249   // Have an oop instance buffer, copy into it
 250   payload_address(obj, dst_temp, field_klass);  // danger, uses rscratch1
 251   pop(alloc_temp);             // restore holder
 252   lea(src, Address(alloc_temp, field_offset));
 253   // call_VM_leaf, clobbers a few regs, save restore new obj
 254   push(obj);
 255   flat_field_copy(IS_DEST_UNINITIALIZED, src, dst_temp, layout_info);
 256   pop(obj);
 257   b(done);
 258 
 259   bind(empty_value);
 260   get_empty_inline_type_oop(field_klass, alloc_temp, obj);
 261   b(done);
 262 
 263   bind(alloc_failed);
 264   pop(obj);
 265   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
 266           obj, entry);
 267 
 268   bind(done);
 269   membar(Assembler::StoreStore);
 270 }
 271 
 272 // Load object from cpool->resolved_references(index)
 273 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 274                                            Register result, Register index, Register tmp) {
 275   assert_different_registers(result, index);
 276 
 277   get_constant_pool(result);
 278   // load pointer for resolved_references[] objArray
 279   ldr(result, Address(result, ConstantPool::cache_offset()));
 280   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 281   resolve_oop_handle(result, tmp, rscratch2);
 282   // Add in the index
 283   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 284   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 285 }
 286 
 287 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 288                              Register cpool, Register index, Register klass, Register temp) {
 289   add(temp, cpool, index, LSL, LogBytesPerWord);
 290   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 291   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 292   add(klass, klass, temp, LSL, LogBytesPerWord);
 293   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 294 }
 295 
 296 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 297 // subtype of super_klass.
 298 //
 299 // Args:
 300 //      r0: superklass
 301 //      Rsub_klass: subklass
 302 //
 303 // Kills:
 304 //      r2, r5
 305 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 306                                                   Label& ok_is_subtype,
 307                                                   bool profile) {
 308   assert(Rsub_klass != r0, "r0 holds superklass");
 309   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 310   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 311 
 312   // Profile the not-null value's klass.
 313   if (profile) {
 314     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 315   }
 316 
 317   // Do the check.
 318   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 319 }
 320 
 321 // Java Expression Stack
 322 
 323 void InterpreterMacroAssembler::pop_ptr(Register r) {
 324   ldr(r, post(esp, wordSize));
 325 }
 326 
 327 void InterpreterMacroAssembler::pop_i(Register r) {
 328   ldrw(r, post(esp, wordSize));
 329 }
 330 
 331 void InterpreterMacroAssembler::pop_l(Register r) {
 332   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 333 }
 334 
 335 void InterpreterMacroAssembler::push_ptr(Register r) {

 669 
 670     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 671     bind(entry);
 672     cmp(c_rarg1, r19); // check if bottom reached
 673     br(Assembler::NE, loop); // if not at bottom then check this entry
 674   }
 675 
 676   bind(no_unlock);
 677 
 678   // jvmti support
 679   if (notify_jvmdi) {
 680     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 681   } else {
 682     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 683   }
 684 
 685   // remove activation
 686   // get sender esp
 687   ldr(rscratch2,
 688       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 689 
 690   if (StackReservedPages > 0) {
 691     // testing if reserved zone needs to be re-enabled
 692     Label no_reserved_zone_enabling;
 693 
 694     // check if already enabled - if so no re-enabling needed
 695     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 696     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 697     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 698     br(Assembler::EQ, no_reserved_zone_enabling);
 699 
 700     // look for an overflow into the stack reserved zone, i.e.
 701     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 702     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 703     cmp(rscratch2, rscratch1);
 704     br(Assembler::LS, no_reserved_zone_enabling);
 705 
 706     call_VM_leaf(
 707       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 708     call_VM(noreg, CAST_FROM_FN_PTR(address,
 709                    InterpreterRuntime::throw_delayed_StackOverflowError));
 710     should_not_reach_here();
 711 
 712     bind(no_reserved_zone_enabling);
 713   }
 714 
 715   if (state == atos && InlineTypeReturnedAsFields) {
 716     // Check if we are returning an non-null inline type and load its fields into registers
 717     Label skip;
 718     test_oop_is_not_inline_type(r0, rscratch2, skip);
 719 
 720     // Load fields from a buffered value with an inline class specific handler
 721     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 722     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 723     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 724     // Unpack handler can be null if inline type is not scalarizable in returns
 725     cbz(rscratch1, skip);
 726 
 727     blr(rscratch1);
 728 #ifdef ASSERT
 729     // TODO 8284443 Enable
 730     if (StressCallingConvention && false) {
 731       Label skip_stress;
 732       ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 733       ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
 734       tstw(rscratch1, MethodFlags::has_scalarized_return_flag());
 735       br(Assembler::EQ, skip_stress);
 736       load_klass(r0, r0);
 737       orr(r0, r0, 1);
 738       bind(skip_stress);
 739     }
 740 #endif
 741     bind(skip);
 742     // Check above kills sender esp in rscratch2. Reload it.
 743     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 744   }
 745 
 746   // restore sender esp
 747   mov(esp, rscratch2);
 748   // remove frame anchor
 749   leave();
 750   // If we're returning to interpreted code we will shortly be
 751   // adjusting SP to allow some space for ESP.  If we're returning to
 752   // compiled code the saved sender SP was saved in sender_sp, so this
 753   // restores it.
 754   andr(sp, esp, -16);
 755 }
 756 
 757 // Lock object
 758 //
 759 // Args:
 760 //      c_rarg1: BasicObjectLock to be used for locking
 761 //
 762 // Kills:
 763 //      r0
 764 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 765 //      rscratch1, rscratch2 (scratch regs)

 786 
 787     Label slow_case;
 788 
 789     // Load object pointer into obj_reg %c_rarg3
 790     ldr(obj_reg, Address(lock_reg, obj_offset));
 791 
 792     if (DiagnoseSyncOnValueBasedClasses != 0) {
 793       load_klass(tmp, obj_reg);
 794       ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
 795       tst(tmp, KlassFlags::_misc_is_value_based_class);
 796       br(Assembler::NE, slow_case);
 797     }
 798 
 799     if (LockingMode == LM_LIGHTWEIGHT) {
 800       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 801       b(done);
 802     } else if (LockingMode == LM_LEGACY) {
 803       // Load (object->mark() | 1) into swap_reg
 804       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 805       orr(swap_reg, rscratch1, 1);
 806       if (EnableValhalla) {
 807         // Mask inline_type bit such that we go to the slow path if object is an inline type
 808         andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 809       }
 810 
 811       // Save (object->mark() | 1) into BasicLock's displaced header
 812       str(swap_reg, Address(lock_reg, mark_offset));
 813 
 814       assert(lock_offset == 0,
 815              "displached header must be first word in BasicObjectLock");
 816 
 817       Label fail;
 818       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 819 
 820       // Fast check for recursive lock.
 821       //
 822       // Can apply the optimization only if this is a stack lock
 823       // allocated in this thread. For efficiency, we can focus on
 824       // recently allocated stack locks (instead of reading the stack
 825       // base and checking whether 'mark' points inside the current
 826       // thread stack):
 827       //  1) (mark & 7) == 0, and
 828       //  2) sp <= mark < mark + os::pagesize()
 829       //

1143     Address data(mdp, in_bytes(JumpData::taken_offset()));
1144     ldr(bumped_count, data);
1145     assert(DataLayout::counter_increment == 1,
1146             "flow-free idiom only works with 1");
1147     // Intel does this to catch overflow
1148     // addptr(bumped_count, DataLayout::counter_increment);
1149     // sbbptr(bumped_count, 0);
1150     // so we do this
1151     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1152     Label L;
1153     br(Assembler::CS, L);       // skip store if counter overflow
1154     str(bumped_count, data);
1155     bind(L);
1156     // The method data pointer needs to be updated to reflect the new target.
1157     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1158     bind(profile_continue);
1159   }
1160 }
1161 
1162 
1163 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1164   if (ProfileInterpreter) {
1165     Label profile_continue;
1166 
1167     // If no method data exists, go to profile_continue.
1168     test_method_data_pointer(mdp, profile_continue);
1169 
1170     // We are taking a branch.  Increment the not taken count.
1171     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1172 
1173     // The method data pointer needs to be updated to correspond to
1174     // the next bytecode
1175     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1176     bind(profile_continue);
1177   }
1178 }
1179 
1180 
1181 void InterpreterMacroAssembler::profile_call(Register mdp) {
1182   if (ProfileInterpreter) {
1183     Label profile_continue;
1184 
1185     // If no method data exists, go to profile_continue.
1186     test_method_data_pointer(mdp, profile_continue);
1187 
1188     // We are making a call.  Increment the count.
1189     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1190 
1191     // The method data pointer needs to be updated to reflect the new target.
1192     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1193     bind(profile_continue);
1194   }
1195 }

1478     // case_array_offset_in_bytes()
1479     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1480     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1481     Assembler::maddw(index, index, reg2, rscratch1);
1482 
1483     // Update the case count
1484     increment_mdp_data_at(mdp,
1485                           index,
1486                           in_bytes(MultiBranchData::relative_count_offset()));
1487 
1488     // The method data pointer needs to be updated.
1489     update_mdp_by_offset(mdp,
1490                          index,
1491                          in_bytes(MultiBranchData::
1492                                   relative_displacement_offset()));
1493 
1494     bind(profile_continue);
1495   }
1496 }
1497 
1498 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1499                                                                               Register array,
1500                                                                               Register tmp) {
1501   if (ProfileInterpreter) {
1502     Label profile_continue;
1503 
1504     // If no method data exists, go to profile_continue.
1505     test_method_data_pointer(mdp, profile_continue);
1506 
1507     mov(tmp, array);
1508     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1509 
1510     Label not_flat;
1511     test_non_flat_array_oop(array, tmp, not_flat);
1512 
1513     set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1514 
1515     bind(not_flat);
1516 
1517     Label not_null_free;
1518     test_non_null_free_array_oop(array, tmp, not_null_free);
1519 
1520     set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1521 
1522     bind(not_null_free);
1523 
1524     bind(profile_continue);
1525   }
1526 }
1527 
1528 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1529                                                                            Register array,
1530                                                                            Register tmp);
1531 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1532                                                                             Register array,
1533                                                                             Register tmp);
1534 
1535 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1536   if (ProfileInterpreter) {
1537     Label profile_continue;
1538 
1539     // If no method data exists, go to profile_continue.
1540     test_method_data_pointer(mdp, profile_continue);
1541 
1542     Label done, update;
1543     cbnz(element, update);
1544     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1545     b(done);
1546 
1547     bind(update);
1548     load_klass(tmp, element);
1549 
1550     // Record the object type.
1551     record_klass_in_profile(tmp, mdp, tmp2);
1552 
1553     bind(done);
1554 
1555     // The method data pointer needs to be updated.
1556     update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1557 
1558     bind(profile_continue);
1559   }
1560 }
1561 
1562 
1563 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1564                                                      Register element,
1565                                                      Register tmp) {
1566   if (ProfileInterpreter) {
1567     Label profile_continue;
1568 
1569     // If no method data exists, go to profile_continue.
1570     test_method_data_pointer(mdp, profile_continue);
1571 
1572     mov(tmp, element);
1573     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1574 
1575     // The method data pointer needs to be updated.
1576     update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1577 
1578     bind(profile_continue);
1579   }
1580 }
1581 
1582 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1583                                              Register left,
1584                                              Register right,
1585                                              Register tmp) {
1586   if (ProfileInterpreter) {
1587     Label profile_continue;
1588 
1589     // If no method data exists, go to profile_continue.
1590     test_method_data_pointer(mdp, profile_continue);
1591 
1592     mov(tmp, left);
1593     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1594 
1595     Label left_not_inline_type;
1596     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1597     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1598     bind(left_not_inline_type);
1599 
1600     mov(tmp, right);
1601     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1602 
1603     Label right_not_inline_type;
1604     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1605     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1606     bind(right_not_inline_type);
1607 
1608     bind(profile_continue);
1609   }
1610 }
1611 
1612 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1613   if (state == atos) {
1614     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1615   }
1616 }
1617 
1618 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1619 
1620 
1621 void InterpreterMacroAssembler::notify_method_entry() {
1622   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1623   // track stack depth.  If it is possible to enter interp_only_mode we add
1624   // the code to check if the event should be sent.
1625   if (JvmtiExport::can_post_interpreter_events()) {
1626     Label L;
1627     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1628     cbzw(r3, L);
1629     call_VM(noreg, CAST_FROM_FN_PTR(address,
1630                                     InterpreterRuntime::post_method_entry));
1631     bind(L);

1893         profile_obj_type(tmp, mdo_arg_addr);
1894 
1895         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1896         off_to_args += to_add;
1897       }
1898 
1899       if (MethodData::profile_return()) {
1900         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1901         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1902       }
1903 
1904       add(rscratch1, mdp, off_to_args);
1905       bind(done);
1906       mov(mdp, rscratch1);
1907 
1908       if (MethodData::profile_return()) {
1909         // We're right after the type profile for the last
1910         // argument. tmp is the number of cells left in the
1911         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1912         // if there's a return to profile.
1913         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1914         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1915       }
1916       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1917     } else {
1918       assert(MethodData::profile_return(), "either profile call args or call ret");
1919       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1920     }
1921 
1922     // mdp points right after the end of the
1923     // CallTypeData/VirtualCallTypeData, right after the cells for the
1924     // return value type if there's one
1925 
1926     bind(profile_continue);
1927   }
1928 }
1929 
1930 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1931   assert_different_registers(mdp, ret, tmp, rbcp);
1932   if (ProfileInterpreter && MethodData::profile_return()) {
1933     Label profile_continue, done;

1939 
1940       // If we don't profile all invoke bytecodes we must make sure
1941       // it's a bytecode we indeed profile. We can't go back to the
1942       // beginning of the ProfileData we intend to update to check its
1943       // type because we're right after it and we don't known its
1944       // length
1945       Label do_profile;
1946       ldrb(rscratch1, Address(rbcp, 0));
1947       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1948       br(Assembler::EQ, do_profile);
1949       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1950       br(Assembler::EQ, do_profile);
1951       get_method(tmp);
1952       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1953       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1954       br(Assembler::NE, profile_continue);
1955 
1956       bind(do_profile);
1957     }
1958 
1959     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1960     mov(tmp, ret);
1961     profile_obj_type(tmp, mdo_ret_addr);
1962 
1963     bind(profile_continue);
1964   }
1965 }
1966 
1967 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1968   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1969   if (ProfileInterpreter && MethodData::profile_parameters()) {
1970     Label profile_continue, done;
1971 
1972     test_method_data_pointer(mdp, profile_continue);
1973 
1974     // Load the offset of the area within the MDO used for
1975     // parameters. If it's negative we're not profiling any parameters
1976     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1977     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1978 
1979     // Compute a pointer to the area for parameters from the offset
< prev index next >