< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"

  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/javaThread.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 void InterpreterMacroAssembler::narrow(Register result) {
  49 
  50   // Get method->_constMethod->_result_type
  51   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  52   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  53   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  54 
  55   Label done, notBool, notByte, notChar;
  56 
  57   // common case first
  58   cmpw(rscratch1, T_INT);

 252   // and from word offset to byte offset
 253   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 254   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 255   // skip past the header
 256   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 257   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 258 }
 259 
 260 void InterpreterMacroAssembler::get_method_counters(Register method,
 261                                                     Register mcs, Label& skip) {
 262   Label has_counters;
 263   ldr(mcs, Address(method, Method::method_counters_offset()));
 264   cbnz(mcs, has_counters);
 265   call_VM(noreg, CAST_FROM_FN_PTR(address,
 266           InterpreterRuntime::build_method_counters), method);
 267   ldr(mcs, Address(method, Method::method_counters_offset()));
 268   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 269   bind(has_counters);
 270 }
 271 































































 272 // Load object from cpool->resolved_references(index)
 273 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 274                                            Register result, Register index, Register tmp) {
 275   assert_different_registers(result, index);
 276 
 277   get_constant_pool(result);
 278   // load pointer for resolved_references[] objArray
 279   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 280   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 281   resolve_oop_handle(result, tmp, rscratch2);
 282   // Add in the index
 283   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 284   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 285 }
 286 
 287 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 288                              Register cpool, Register index, Register klass, Register temp) {
 289   add(temp, cpool, index, LSL, LogBytesPerWord);
 290   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 291   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 298                                                               Register cache) {
 299   const int method_offset = in_bytes(
 300     ConstantPoolCache::base_offset() +
 301       ((byte_no == TemplateTable::f2_byte)
 302        ? ConstantPoolCacheEntry::f2_offset()
 303        : ConstantPoolCacheEntry::f1_offset()));
 304 
 305   ldr(method, Address(cache, method_offset)); // get f1 Method*
 306 }
 307 
 308 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 309 // subtype of super_klass.
 310 //
 311 // Args:
 312 //      r0: superklass
 313 //      Rsub_klass: subklass
 314 //
 315 // Kills:
 316 //      r2, r5
 317 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 318                                                   Label& ok_is_subtype) {

 319   assert(Rsub_klass != r0, "r0 holds superklass");
 320   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 321   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 322 
 323   // Profile the not-null value's klass.
 324   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5


 325 
 326   // Do the check.
 327   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 328 
 329   // Profile the failure of the check.
 330   profile_typecheck_failed(r2); // blows r2


 331 }
 332 
 333 // Java Expression Stack
 334 
 335 void InterpreterMacroAssembler::pop_ptr(Register r) {
 336   ldr(r, post(esp, wordSize));
 337 }
 338 
 339 void InterpreterMacroAssembler::pop_i(Register r) {
 340   ldrw(r, post(esp, wordSize));
 341 }
 342 
 343 void InterpreterMacroAssembler::pop_l(Register r) {
 344   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 345 }
 346 
 347 void InterpreterMacroAssembler::push_ptr(Register r) {
 348   str(r, pre(esp, -wordSize));
 349  }
 350 

 671 
 672     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 673     bind(entry);
 674     cmp(c_rarg1, r19); // check if bottom reached
 675     br(Assembler::NE, loop); // if not at bottom then check this entry
 676   }
 677 
 678   bind(no_unlock);
 679 
 680   // jvmti support
 681   if (notify_jvmdi) {
 682     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 683   } else {
 684     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 685   }
 686 
 687   // remove activation
 688   // get sender esp
 689   ldr(rscratch2,
 690       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 691   if (StackReservedPages > 0) {
 692     // testing if reserved zone needs to be re-enabled
 693     Label no_reserved_zone_enabling;
 694 
 695     // look for an overflow into the stack reserved zone, i.e.
 696     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 697     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 698     cmp(rscratch2, rscratch1);
 699     br(Assembler::LS, no_reserved_zone_enabling);
 700 
 701     call_VM_leaf(
 702       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 703     call_VM(noreg, CAST_FROM_FN_PTR(address,
 704                    InterpreterRuntime::throw_delayed_StackOverflowError));
 705     should_not_reach_here();
 706 
 707     bind(no_reserved_zone_enabling);
 708   }
 709 

































 710   // restore sender esp
 711   mov(esp, rscratch2);
 712   // remove frame anchor
 713   leave();
 714   // If we're returning to interpreted code we will shortly be
 715   // adjusting SP to allow some space for ESP.  If we're returning to
 716   // compiled code the saved sender SP was saved in sender_sp, so this
 717   // restores it.
 718   andr(sp, esp, -16);
 719 }
 720 
 721 // Lock object
 722 //
 723 // Args:
 724 //      c_rarg1: BasicObjectLock to be used for locking
 725 //
 726 // Kills:
 727 //      r0
 728 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 729 //      rscratch1, rscratch2 (scratch regs)

 744     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 745     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 746     const int mark_offset = lock_offset +
 747                             BasicLock::displaced_header_offset_in_bytes();
 748 
 749     Label slow_case;
 750 
 751     // Load object pointer into obj_reg %c_rarg3
 752     ldr(obj_reg, Address(lock_reg, obj_offset));
 753 
 754     if (DiagnoseSyncOnValueBasedClasses != 0) {
 755       load_klass(tmp, obj_reg);
 756       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 757       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 758       br(Assembler::NE, slow_case);
 759     }
 760 
 761     // Load (object->mark() | 1) into swap_reg
 762     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 763     orr(swap_reg, rscratch1, 1);




 764 
 765     // Save (object->mark() | 1) into BasicLock's displaced header
 766     str(swap_reg, Address(lock_reg, mark_offset));
 767 
 768     assert(lock_offset == 0,
 769            "displached header must be first word in BasicObjectLock");
 770 
 771     Label fail;
 772     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
 773 
 774     // Fast check for recursive lock.
 775     //
 776     // Can apply the optimization only if this is a stack lock
 777     // allocated in this thread. For efficiency, we can focus on
 778     // recently allocated stack locks (instead of reading the stack
 779     // base and checking whether 'mark' points inside the current
 780     // thread stack):
 781     //  1) (mark & 7) == 0, and
 782     //  2) sp <= mark < mark + os::pagesize()
 783     //

1088     Address data(mdp, in_bytes(JumpData::taken_offset()));
1089     ldr(bumped_count, data);
1090     assert(DataLayout::counter_increment == 1,
1091             "flow-free idiom only works with 1");
1092     // Intel does this to catch overflow
1093     // addptr(bumped_count, DataLayout::counter_increment);
1094     // sbbptr(bumped_count, 0);
1095     // so we do this
1096     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1097     Label L;
1098     br(Assembler::CS, L);       // skip store if counter overflow
1099     str(bumped_count, data);
1100     bind(L);
1101     // The method data pointer needs to be updated to reflect the new target.
1102     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1103     bind(profile_continue);
1104   }
1105 }
1106 
1107 
1108 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1109   if (ProfileInterpreter) {
1110     Label profile_continue;
1111 
1112     // If no method data exists, go to profile_continue.
1113     test_method_data_pointer(mdp, profile_continue);
1114 
1115     // We are taking a branch.  Increment the not taken count.
1116     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1117 
1118     // The method data pointer needs to be updated to correspond to
1119     // the next bytecode
1120     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1121     bind(profile_continue);
1122   }
1123 }
1124 
1125 
1126 void InterpreterMacroAssembler::profile_call(Register mdp) {
1127   if (ProfileInterpreter) {
1128     Label profile_continue;
1129 
1130     // If no method data exists, go to profile_continue.
1131     test_method_data_pointer(mdp, profile_continue);
1132 
1133     // We are making a call.  Increment the count.
1134     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1135 
1136     // The method data pointer needs to be updated to reflect the new target.
1137     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1138     bind(profile_continue);
1139   }
1140 }

1464     // case_array_offset_in_bytes()
1465     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1466     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1467     Assembler::maddw(index, index, reg2, rscratch1);
1468 
1469     // Update the case count
1470     increment_mdp_data_at(mdp,
1471                           index,
1472                           in_bytes(MultiBranchData::relative_count_offset()));
1473 
1474     // The method data pointer needs to be updated.
1475     update_mdp_by_offset(mdp,
1476                          index,
1477                          in_bytes(MultiBranchData::
1478                                   relative_displacement_offset()));
1479 
1480     bind(profile_continue);
1481   }
1482 }
1483 















































































1484 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1485   if (state == atos) {
1486     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1487   }
1488 }
1489 
1490 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1491 
1492 
1493 void InterpreterMacroAssembler::notify_method_entry() {
1494   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1495   // track stack depth.  If it is possible to enter interp_only_mode we add
1496   // the code to check if the event should be sent.
1497   if (JvmtiExport::can_post_interpreter_events()) {
1498     Label L;
1499     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1500     cbzw(r3, L);
1501     call_VM(noreg, CAST_FROM_FN_PTR(address,
1502                                     InterpreterRuntime::post_method_entry));
1503     bind(L);

1714         profile_obj_type(tmp, mdo_arg_addr);
1715 
1716         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1717         off_to_args += to_add;
1718       }
1719 
1720       if (MethodData::profile_return()) {
1721         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1722         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1723       }
1724 
1725       add(rscratch1, mdp, off_to_args);
1726       bind(done);
1727       mov(mdp, rscratch1);
1728 
1729       if (MethodData::profile_return()) {
1730         // We're right after the type profile for the last
1731         // argument. tmp is the number of cells left in the
1732         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1733         // if there's a return to profile.
1734         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1735         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1736       }
1737       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1738     } else {
1739       assert(MethodData::profile_return(), "either profile call args or call ret");
1740       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1741     }
1742 
1743     // mdp points right after the end of the
1744     // CallTypeData/VirtualCallTypeData, right after the cells for the
1745     // return value type if there's one
1746 
1747     bind(profile_continue);
1748   }
1749 }
1750 
1751 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1752   assert_different_registers(mdp, ret, tmp, rbcp);
1753   if (ProfileInterpreter && MethodData::profile_return()) {
1754     Label profile_continue, done;

1760 
1761       // If we don't profile all invoke bytecodes we must make sure
1762       // it's a bytecode we indeed profile. We can't go back to the
1763       // beginning of the ProfileData we intend to update to check its
1764       // type because we're right after it and we don't known its
1765       // length
1766       Label do_profile;
1767       ldrb(rscratch1, Address(rbcp, 0));
1768       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1769       br(Assembler::EQ, do_profile);
1770       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1771       br(Assembler::EQ, do_profile);
1772       get_method(tmp);
1773       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1774       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1775       br(Assembler::NE, profile_continue);
1776 
1777       bind(do_profile);
1778     }
1779 
1780     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1781     mov(tmp, ret);
1782     profile_obj_type(tmp, mdo_ret_addr);
1783 
1784     bind(profile_continue);
1785   }
1786 }
1787 
1788 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1789   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1790   if (ProfileInterpreter && MethodData::profile_parameters()) {
1791     Label profile_continue, done;
1792 
1793     test_method_data_pointer(mdp, profile_continue);
1794 
1795     // Load the offset of the area within the MDO used for
1796     // parameters. If it's negative we're not profiling any parameters
1797     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1798     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1799 
1800     // Compute a pointer to the area for parameters from the offset

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/basicLock.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/javaThread.hpp"
  45 #include "runtime/safepointMechanism.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 void InterpreterMacroAssembler::narrow(Register result) {
  50 
  51   // Get method->_constMethod->_result_type
  52   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  53   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  54   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  55 
  56   Label done, notBool, notByte, notChar;
  57 
  58   // common case first
  59   cmpw(rscratch1, T_INT);

 253   // and from word offset to byte offset
 254   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 255   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 256   // skip past the header
 257   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 258   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 259 }
 260 
 261 void InterpreterMacroAssembler::get_method_counters(Register method,
 262                                                     Register mcs, Label& skip) {
 263   Label has_counters;
 264   ldr(mcs, Address(method, Method::method_counters_offset()));
 265   cbnz(mcs, has_counters);
 266   call_VM(noreg, CAST_FROM_FN_PTR(address,
 267           InterpreterRuntime::build_method_counters), method);
 268   ldr(mcs, Address(method, Method::method_counters_offset()));
 269   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 270   bind(has_counters);
 271 }
 272 
 273 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 274                                                   Register t1, Register t2,
 275                                                   bool clear_fields, Label& alloc_failed) {
 276   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 277   {
 278     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
 279     // Trigger dtrace event for fastpath
 280     push(atos);
 281     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
 282     pop(atos);
 283   }
 284 }
 285 
 286 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
 287                                                    Register field_index, Register field_offset,
 288                                                    Register temp, Register obj) {
 289   Label alloc_failed, empty_value, done;
 290   const Register src = field_offset;
 291   const Register alloc_temp = rscratch1;
 292   const Register dst_temp   = temp;
 293   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
 294 
 295   // Grab the inline field klass
 296   push(holder_klass);
 297   const Register field_klass = holder_klass;
 298   get_inline_type_field_klass(holder_klass, field_index, field_klass);
 299 
 300   //check for empty value klass
 301   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
 302 
 303   // allocate buffer
 304   push(obj); // save holder
 305   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
 306 
 307   // Have an oop instance buffer, copy into it
 308   data_for_oop(obj, dst_temp, field_klass);
 309   pop(alloc_temp);             // restore holder
 310   lea(src, Address(alloc_temp, field_offset));
 311   // call_VM_leaf, clobbers a few regs, save restore new obj
 312   push(obj);
 313   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
 314   pop(obj);
 315   pop(holder_klass);
 316   b(done);
 317 
 318   bind(empty_value);
 319   get_empty_inline_type_oop(field_klass, dst_temp, obj);
 320   pop(holder_klass);
 321   b(done);
 322 
 323   bind(alloc_failed);
 324   pop(obj);
 325   pop(holder_klass);
 326   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
 327           obj, field_index, holder_klass);
 328 
 329   bind(done);
 330 
 331   // Ensure the stores to copy the inline field contents are visible
 332   // before any subsequent store that publishes this reference.
 333   membar(Assembler::StoreStore);
 334 }
 335 
 336 // Load object from cpool->resolved_references(index)
 337 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 338                                            Register result, Register index, Register tmp) {
 339   assert_different_registers(result, index);
 340 
 341   get_constant_pool(result);
 342   // load pointer for resolved_references[] objArray
 343   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 344   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 345   resolve_oop_handle(result, tmp, rscratch2);
 346   // Add in the index
 347   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 348   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 349 }
 350 
 351 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 352                              Register cpool, Register index, Register klass, Register temp) {
 353   add(temp, cpool, index, LSL, LogBytesPerWord);
 354   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 355   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 362                                                               Register cache) {
 363   const int method_offset = in_bytes(
 364     ConstantPoolCache::base_offset() +
 365       ((byte_no == TemplateTable::f2_byte)
 366        ? ConstantPoolCacheEntry::f2_offset()
 367        : ConstantPoolCacheEntry::f1_offset()));
 368 
 369   ldr(method, Address(cache, method_offset)); // get f1 Method*
 370 }
 371 
 372 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 373 // subtype of super_klass.
 374 //
 375 // Args:
 376 //      r0: superklass
 377 //      Rsub_klass: subklass
 378 //
 379 // Kills:
 380 //      r2, r5
 381 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 382                                                   Label& ok_is_subtype,
 383                                                   bool profile) {
 384   assert(Rsub_klass != r0, "r0 holds superklass");
 385   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 386   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 387 
 388   // Profile the not-null value's klass.
 389   if (profile) {
 390     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 391   }
 392 
 393   // Do the check.
 394   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 395 
 396   // Profile the failure of the check.
 397   if (profile) {
 398     profile_typecheck_failed(r2); // blows r2
 399   }
 400 }
 401 
 402 // Java Expression Stack
 403 
 404 void InterpreterMacroAssembler::pop_ptr(Register r) {
 405   ldr(r, post(esp, wordSize));
 406 }
 407 
 408 void InterpreterMacroAssembler::pop_i(Register r) {
 409   ldrw(r, post(esp, wordSize));
 410 }
 411 
 412 void InterpreterMacroAssembler::pop_l(Register r) {
 413   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 414 }
 415 
 416 void InterpreterMacroAssembler::push_ptr(Register r) {
 417   str(r, pre(esp, -wordSize));
 418  }
 419 

 740 
 741     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 742     bind(entry);
 743     cmp(c_rarg1, r19); // check if bottom reached
 744     br(Assembler::NE, loop); // if not at bottom then check this entry
 745   }
 746 
 747   bind(no_unlock);
 748 
 749   // jvmti support
 750   if (notify_jvmdi) {
 751     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 752   } else {
 753     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 754   }
 755 
 756   // remove activation
 757   // get sender esp
 758   ldr(rscratch2,
 759       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 760 
 761   if (StackReservedPages > 0) {
 762     // testing if reserved zone needs to be re-enabled
 763     Label no_reserved_zone_enabling;
 764 
 765     // look for an overflow into the stack reserved zone, i.e.
 766     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 767     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 768     cmp(rscratch2, rscratch1);
 769     br(Assembler::LS, no_reserved_zone_enabling);
 770 
 771     call_VM_leaf(
 772       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 773     call_VM(noreg, CAST_FROM_FN_PTR(address,
 774                    InterpreterRuntime::throw_delayed_StackOverflowError));
 775     should_not_reach_here();
 776 
 777     bind(no_reserved_zone_enabling);
 778   }
 779 
 780 
 781   if (state == atos && InlineTypeReturnedAsFields) {
 782     // Check if we are returning an non-null inline type and load its fields into registers
 783     Label skip;
 784     test_oop_is_not_inline_type(r0, rscratch2, skip);
 785 
 786     // Load fields from a buffered value with an inline class specific handler
 787     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 788     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 789     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 790     // Unpack handler can be null if inline type is not scalarizable in returns
 791     cbz(rscratch1, skip);
 792 
 793     blr(rscratch1);
 794 #ifdef ASSERT
 795     if (StressInlineTypeReturnedAsFields) {
 796       // TODO 8284443 Enable this for value class returns (L-type descriptor)
 797       Label skip_stress;
 798       ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 799       ldr(rscratch1, Address(rscratch1, Method::const_offset()));
 800       ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
 801       cmpw(rscratch1, (u1) T_PRIMITIVE_OBJECT);
 802       br(Assembler::NE, skip_stress);
 803       load_klass(r0, r0);
 804       orr(r0, r0, 1);
 805       bind(skip_stress);
 806     }
 807 #endif
 808     bind(skip);
 809     // Check above kills sender esp in rscratch2. Reload it.
 810     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 811   }
 812 
 813   // restore sender esp
 814   mov(esp, rscratch2);
 815   // remove frame anchor
 816   leave();
 817   // If we're returning to interpreted code we will shortly be
 818   // adjusting SP to allow some space for ESP.  If we're returning to
 819   // compiled code the saved sender SP was saved in sender_sp, so this
 820   // restores it.
 821   andr(sp, esp, -16);
 822 }
 823 
 824 // Lock object
 825 //
 826 // Args:
 827 //      c_rarg1: BasicObjectLock to be used for locking
 828 //
 829 // Kills:
 830 //      r0
 831 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 832 //      rscratch1, rscratch2 (scratch regs)

 847     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 848     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 849     const int mark_offset = lock_offset +
 850                             BasicLock::displaced_header_offset_in_bytes();
 851 
 852     Label slow_case;
 853 
 854     // Load object pointer into obj_reg %c_rarg3
 855     ldr(obj_reg, Address(lock_reg, obj_offset));
 856 
 857     if (DiagnoseSyncOnValueBasedClasses != 0) {
 858       load_klass(tmp, obj_reg);
 859       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 860       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 861       br(Assembler::NE, slow_case);
 862     }
 863 
 864     // Load (object->mark() | 1) into swap_reg
 865     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 866     orr(swap_reg, rscratch1, 1);
 867     if (EnableValhalla) {
 868       // Mask inline_type bit such that we go to the slow path if object is an inline type
 869       andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 870     }
 871 
 872     // Save (object->mark() | 1) into BasicLock's displaced header
 873     str(swap_reg, Address(lock_reg, mark_offset));
 874 
 875     assert(lock_offset == 0,
 876            "displached header must be first word in BasicObjectLock");
 877 
 878     Label fail;
 879     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
 880 
 881     // Fast check for recursive lock.
 882     //
 883     // Can apply the optimization only if this is a stack lock
 884     // allocated in this thread. For efficiency, we can focus on
 885     // recently allocated stack locks (instead of reading the stack
 886     // base and checking whether 'mark' points inside the current
 887     // thread stack):
 888     //  1) (mark & 7) == 0, and
 889     //  2) sp <= mark < mark + os::pagesize()
 890     //

1195     Address data(mdp, in_bytes(JumpData::taken_offset()));
1196     ldr(bumped_count, data);
1197     assert(DataLayout::counter_increment == 1,
1198             "flow-free idiom only works with 1");
1199     // Intel does this to catch overflow
1200     // addptr(bumped_count, DataLayout::counter_increment);
1201     // sbbptr(bumped_count, 0);
1202     // so we do this
1203     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1204     Label L;
1205     br(Assembler::CS, L);       // skip store if counter overflow
1206     str(bumped_count, data);
1207     bind(L);
1208     // The method data pointer needs to be updated to reflect the new target.
1209     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1210     bind(profile_continue);
1211   }
1212 }
1213 
1214 
1215 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1216   if (ProfileInterpreter) {
1217     Label profile_continue;
1218 
1219     // If no method data exists, go to profile_continue.
1220     test_method_data_pointer(mdp, profile_continue);
1221 
1222     // We are taking a branch.  Increment the not taken count.
1223     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1224 
1225     // The method data pointer needs to be updated to correspond to
1226     // the next bytecode
1227     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1228     bind(profile_continue);
1229   }
1230 }
1231 
1232 
1233 void InterpreterMacroAssembler::profile_call(Register mdp) {
1234   if (ProfileInterpreter) {
1235     Label profile_continue;
1236 
1237     // If no method data exists, go to profile_continue.
1238     test_method_data_pointer(mdp, profile_continue);
1239 
1240     // We are making a call.  Increment the count.
1241     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1242 
1243     // The method data pointer needs to be updated to reflect the new target.
1244     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1245     bind(profile_continue);
1246   }
1247 }

1571     // case_array_offset_in_bytes()
1572     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1573     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1574     Assembler::maddw(index, index, reg2, rscratch1);
1575 
1576     // Update the case count
1577     increment_mdp_data_at(mdp,
1578                           index,
1579                           in_bytes(MultiBranchData::relative_count_offset()));
1580 
1581     // The method data pointer needs to be updated.
1582     update_mdp_by_offset(mdp,
1583                          index,
1584                          in_bytes(MultiBranchData::
1585                                   relative_displacement_offset()));
1586 
1587     bind(profile_continue);
1588   }
1589 }
1590 
1591 void InterpreterMacroAssembler::profile_array(Register mdp,
1592                                               Register array,
1593                                               Register tmp) {
1594   if (ProfileInterpreter) {
1595     Label profile_continue;
1596 
1597     // If no method data exists, go to profile_continue.
1598     test_method_data_pointer(mdp, profile_continue);
1599 
1600     mov(tmp, array);
1601     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1602 
1603     Label not_flat;
1604     test_non_flattened_array_oop(array, tmp, not_flat);
1605 
1606     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1607 
1608     bind(not_flat);
1609 
1610     Label not_null_free;
1611     test_non_null_free_array_oop(array, tmp, not_null_free);
1612 
1613     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1614 
1615     bind(not_null_free);
1616 
1617     bind(profile_continue);
1618   }
1619 }
1620 
1621 void InterpreterMacroAssembler::profile_element(Register mdp,
1622                                                 Register element,
1623                                                 Register tmp) {
1624   if (ProfileInterpreter) {
1625     Label profile_continue;
1626 
1627     // If no method data exists, go to profile_continue.
1628     test_method_data_pointer(mdp, profile_continue);
1629 
1630     mov(tmp, element);
1631     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1632 
1633     // The method data pointer needs to be updated.
1634     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1635 
1636     bind(profile_continue);
1637   }
1638 }
1639 
1640 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1641                                              Register left,
1642                                              Register right,
1643                                              Register tmp) {
1644   if (ProfileInterpreter) {
1645     Label profile_continue;
1646 
1647     // If no method data exists, go to profile_continue.
1648     test_method_data_pointer(mdp, profile_continue);
1649 
1650     mov(tmp, left);
1651     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1652 
1653     Label left_not_inline_type;
1654     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1655     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1656     bind(left_not_inline_type);
1657 
1658     mov(tmp, right);
1659     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1660 
1661     Label right_not_inline_type;
1662     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1663     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1664     bind(right_not_inline_type);
1665 
1666     bind(profile_continue);
1667   }
1668 }
1669 
1670 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1671   if (state == atos) {
1672     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1673   }
1674 }
1675 
1676 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1677 
1678 
1679 void InterpreterMacroAssembler::notify_method_entry() {
1680   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1681   // track stack depth.  If it is possible to enter interp_only_mode we add
1682   // the code to check if the event should be sent.
1683   if (JvmtiExport::can_post_interpreter_events()) {
1684     Label L;
1685     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1686     cbzw(r3, L);
1687     call_VM(noreg, CAST_FROM_FN_PTR(address,
1688                                     InterpreterRuntime::post_method_entry));
1689     bind(L);

1900         profile_obj_type(tmp, mdo_arg_addr);
1901 
1902         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1903         off_to_args += to_add;
1904       }
1905 
1906       if (MethodData::profile_return()) {
1907         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1908         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1909       }
1910 
1911       add(rscratch1, mdp, off_to_args);
1912       bind(done);
1913       mov(mdp, rscratch1);
1914 
1915       if (MethodData::profile_return()) {
1916         // We're right after the type profile for the last
1917         // argument. tmp is the number of cells left in the
1918         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1919         // if there's a return to profile.
1920         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1921         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1922       }
1923       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1924     } else {
1925       assert(MethodData::profile_return(), "either profile call args or call ret");
1926       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1927     }
1928 
1929     // mdp points right after the end of the
1930     // CallTypeData/VirtualCallTypeData, right after the cells for the
1931     // return value type if there's one
1932 
1933     bind(profile_continue);
1934   }
1935 }
1936 
1937 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1938   assert_different_registers(mdp, ret, tmp, rbcp);
1939   if (ProfileInterpreter && MethodData::profile_return()) {
1940     Label profile_continue, done;

1946 
1947       // If we don't profile all invoke bytecodes we must make sure
1948       // it's a bytecode we indeed profile. We can't go back to the
1949       // beginning of the ProfileData we intend to update to check its
1950       // type because we're right after it and we don't known its
1951       // length
1952       Label do_profile;
1953       ldrb(rscratch1, Address(rbcp, 0));
1954       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1955       br(Assembler::EQ, do_profile);
1956       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1957       br(Assembler::EQ, do_profile);
1958       get_method(tmp);
1959       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1960       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1961       br(Assembler::NE, profile_continue);
1962 
1963       bind(do_profile);
1964     }
1965 
1966     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1967     mov(tmp, ret);
1968     profile_obj_type(tmp, mdo_ret_addr);
1969 
1970     bind(profile_continue);
1971   }
1972 }
1973 
1974 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1975   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1976   if (ProfileInterpreter && MethodData::profile_parameters()) {
1977     Label profile_continue, done;
1978 
1979     test_method_data_pointer(mdp, profile_continue);
1980 
1981     // Load the offset of the area within the MDO used for
1982     // parameters. If it's negative we're not profiling any parameters
1983     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1984     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1985 
1986     // Compute a pointer to the area for parameters from the offset
< prev index next >