< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"

  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 void InterpreterMacroAssembler::narrow(Register result) {
  49 
  50   // Get method->_constMethod->_result_type
  51   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  52   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  53   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  54 
  55   Label done, notBool, notByte, notChar;
  56 
  57   // common case first
  58   cmpw(rscratch1, T_INT);

 247   // and from word offset to byte offset
 248   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 249   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 250   // skip past the header
 251   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 252   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 253 }
 254 
 255 void InterpreterMacroAssembler::get_method_counters(Register method,
 256                                                     Register mcs, Label& skip) {
 257   Label has_counters;
 258   ldr(mcs, Address(method, Method::method_counters_offset()));
 259   cbnz(mcs, has_counters);
 260   call_VM(noreg, CAST_FROM_FN_PTR(address,
 261           InterpreterRuntime::build_method_counters), method);
 262   ldr(mcs, Address(method, Method::method_counters_offset()));
 263   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 264   bind(has_counters);
 265 }
 266 































































 267 // Load object from cpool->resolved_references(index)
 268 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 269                                            Register result, Register index, Register tmp) {
 270   assert_different_registers(result, index);
 271 
 272   get_constant_pool(result);
 273   // load pointer for resolved_references[] objArray
 274   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 275   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 276   resolve_oop_handle(result, tmp);
 277   // Add in the index
 278   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 279   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 280 }
 281 
 282 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 283                              Register cpool, Register index, Register klass, Register temp) {
 284   add(temp, cpool, index, LSL, LogBytesPerWord);
 285   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 286   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 293                                                               Register cache) {
 294   const int method_offset = in_bytes(
 295     ConstantPoolCache::base_offset() +
 296       ((byte_no == TemplateTable::f2_byte)
 297        ? ConstantPoolCacheEntry::f2_offset()
 298        : ConstantPoolCacheEntry::f1_offset()));
 299 
 300   ldr(method, Address(cache, method_offset)); // get f1 Method*
 301 }
 302 
 303 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 304 // subtype of super_klass.
 305 //
 306 // Args:
 307 //      r0: superklass
 308 //      Rsub_klass: subklass
 309 //
 310 // Kills:
 311 //      r2, r5
 312 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 313                                                   Label& ok_is_subtype) {

 314   assert(Rsub_klass != r0, "r0 holds superklass");
 315   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 316   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 317 
 318   // Profile the not-null value's klass.
 319   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5


 320 
 321   // Do the check.
 322   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 323 
 324   // Profile the failure of the check.
 325   profile_typecheck_failed(r2); // blows r2


 326 }
 327 
 328 // Java Expression Stack
 329 
 330 void InterpreterMacroAssembler::pop_ptr(Register r) {
 331   ldr(r, post(esp, wordSize));
 332 }
 333 
 334 void InterpreterMacroAssembler::pop_i(Register r) {
 335   ldrw(r, post(esp, wordSize));
 336 }
 337 
 338 void InterpreterMacroAssembler::pop_l(Register r) {
 339   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 340 }
 341 
 342 void InterpreterMacroAssembler::push_ptr(Register r) {
 343   str(r, pre(esp, -wordSize));
 344  }
 345 

 666 
 667     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 668     bind(entry);
 669     cmp(c_rarg1, r19); // check if bottom reached
 670     br(Assembler::NE, loop); // if not at bottom then check this entry
 671   }
 672 
 673   bind(no_unlock);
 674 
 675   // jvmti support
 676   if (notify_jvmdi) {
 677     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 678   } else {
 679     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 680   }
 681 
 682   // remove activation
 683   // get sender esp
 684   ldr(rscratch2,
 685       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 686   if (StackReservedPages > 0) {
 687     // testing if reserved zone needs to be re-enabled
 688     Label no_reserved_zone_enabling;
 689 
 690     // look for an overflow into the stack reserved zone, i.e.
 691     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 692     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 693     cmp(rscratch2, rscratch1);
 694     br(Assembler::LS, no_reserved_zone_enabling);
 695 
 696     call_VM_leaf(
 697       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 698     call_VM(noreg, CAST_FROM_FN_PTR(address,
 699                    InterpreterRuntime::throw_delayed_StackOverflowError));
 700     should_not_reach_here();
 701 
 702     bind(no_reserved_zone_enabling);
 703   }
 704 

































 705   // restore sender esp
 706   mov(esp, rscratch2);
 707   // remove frame anchor
 708   leave();
 709   // If we're returning to interpreted code we will shortly be
 710   // adjusting SP to allow some space for ESP.  If we're returning to
 711   // compiled code the saved sender SP was saved in sender_sp, so this
 712   // restores it.
 713   andr(sp, esp, -16);
 714 }
 715 
 716 // Lock object
 717 //
 718 // Args:
 719 //      c_rarg1: BasicObjectLock to be used for locking
 720 //
 721 // Kills:
 722 //      r0
 723 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 724 //      rscratch1, rscratch2 (scratch regs)

 739     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 740     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 741     const int mark_offset = lock_offset +
 742                             BasicLock::displaced_header_offset_in_bytes();
 743 
 744     Label slow_case;
 745 
 746     // Load object pointer into obj_reg %c_rarg3
 747     ldr(obj_reg, Address(lock_reg, obj_offset));
 748 
 749     if (DiagnoseSyncOnValueBasedClasses != 0) {
 750       load_klass(tmp, obj_reg);
 751       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 752       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 753       br(Assembler::NE, slow_case);
 754     }
 755 
 756     // Load (object->mark() | 1) into swap_reg
 757     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 758     orr(swap_reg, rscratch1, 1);




 759 
 760     // Save (object->mark() | 1) into BasicLock's displaced header
 761     str(swap_reg, Address(lock_reg, mark_offset));
 762 
 763     assert(lock_offset == 0,
 764            "displached header must be first word in BasicObjectLock");
 765 
 766     Label fail;
 767     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 768 
 769     // Fast check for recursive lock.
 770     //
 771     // Can apply the optimization only if this is a stack lock
 772     // allocated in this thread. For efficiency, we can focus on
 773     // recently allocated stack locks (instead of reading the stack
 774     // base and checking whether 'mark' points inside the current
 775     // thread stack):
 776     //  1) (mark & 7) == 0, and
 777     //  2) sp <= mark < mark + os::pagesize()
 778     //

1076     Address data(mdp, in_bytes(JumpData::taken_offset()));
1077     ldr(bumped_count, data);
1078     assert(DataLayout::counter_increment == 1,
1079             "flow-free idiom only works with 1");
1080     // Intel does this to catch overflow
1081     // addptr(bumped_count, DataLayout::counter_increment);
1082     // sbbptr(bumped_count, 0);
1083     // so we do this
1084     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1085     Label L;
1086     br(Assembler::CS, L);       // skip store if counter overflow
1087     str(bumped_count, data);
1088     bind(L);
1089     // The method data pointer needs to be updated to reflect the new target.
1090     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1091     bind(profile_continue);
1092   }
1093 }
1094 
1095 
1096 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1097   if (ProfileInterpreter) {
1098     Label profile_continue;
1099 
1100     // If no method data exists, go to profile_continue.
1101     test_method_data_pointer(mdp, profile_continue);
1102 
1103     // We are taking a branch.  Increment the not taken count.
1104     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1105 
1106     // The method data pointer needs to be updated to correspond to
1107     // the next bytecode
1108     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1109     bind(profile_continue);
1110   }
1111 }
1112 
1113 
1114 void InterpreterMacroAssembler::profile_call(Register mdp) {
1115   if (ProfileInterpreter) {
1116     Label profile_continue;
1117 
1118     // If no method data exists, go to profile_continue.
1119     test_method_data_pointer(mdp, profile_continue);
1120 
1121     // We are making a call.  Increment the count.
1122     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1123 
1124     // The method data pointer needs to be updated to reflect the new target.
1125     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1126     bind(profile_continue);
1127   }
1128 }

1452     // case_array_offset_in_bytes()
1453     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1454     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1455     Assembler::maddw(index, index, reg2, rscratch1);
1456 
1457     // Update the case count
1458     increment_mdp_data_at(mdp,
1459                           index,
1460                           in_bytes(MultiBranchData::relative_count_offset()));
1461 
1462     // The method data pointer needs to be updated.
1463     update_mdp_by_offset(mdp,
1464                          index,
1465                          in_bytes(MultiBranchData::
1466                                   relative_displacement_offset()));
1467 
1468     bind(profile_continue);
1469   }
1470 }
1471 















































































1472 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1473   if (state == atos) {
1474     MacroAssembler::verify_oop(reg);
1475   }
1476 }
1477 
1478 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1479 
1480 
1481 void InterpreterMacroAssembler::notify_method_entry() {
1482   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1483   // track stack depth.  If it is possible to enter interp_only_mode we add
1484   // the code to check if the event should be sent.
1485   if (JvmtiExport::can_post_interpreter_events()) {
1486     Label L;
1487     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1488     cbzw(r3, L);
1489     call_VM(noreg, CAST_FROM_FN_PTR(address,
1490                                     InterpreterRuntime::post_method_entry));
1491     bind(L);

1702         profile_obj_type(tmp, mdo_arg_addr);
1703 
1704         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1705         off_to_args += to_add;
1706       }
1707 
1708       if (MethodData::profile_return()) {
1709         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1710         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1711       }
1712 
1713       add(rscratch1, mdp, off_to_args);
1714       bind(done);
1715       mov(mdp, rscratch1);
1716 
1717       if (MethodData::profile_return()) {
1718         // We're right after the type profile for the last
1719         // argument. tmp is the number of cells left in the
1720         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1721         // if there's a return to profile.
1722         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1723         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1724       }
1725       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1726     } else {
1727       assert(MethodData::profile_return(), "either profile call args or call ret");
1728       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1729     }
1730 
1731     // mdp points right after the end of the
1732     // CallTypeData/VirtualCallTypeData, right after the cells for the
1733     // return value type if there's one
1734 
1735     bind(profile_continue);
1736   }
1737 }
1738 
1739 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1740   assert_different_registers(mdp, ret, tmp, rbcp);
1741   if (ProfileInterpreter && MethodData::profile_return()) {
1742     Label profile_continue, done;

1748 
1749       // If we don't profile all invoke bytecodes we must make sure
1750       // it's a bytecode we indeed profile. We can't go back to the
1751       // begining of the ProfileData we intend to update to check its
1752       // type because we're right after it and we don't known its
1753       // length
1754       Label do_profile;
1755       ldrb(rscratch1, Address(rbcp, 0));
1756       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1757       br(Assembler::EQ, do_profile);
1758       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1759       br(Assembler::EQ, do_profile);
1760       get_method(tmp);
1761       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1762       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1763       br(Assembler::NE, profile_continue);
1764 
1765       bind(do_profile);
1766     }
1767 
1768     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1769     mov(tmp, ret);
1770     profile_obj_type(tmp, mdo_ret_addr);
1771 
1772     bind(profile_continue);
1773   }
1774 }
1775 
1776 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1777   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1778   if (ProfileInterpreter && MethodData::profile_parameters()) {
1779     Label profile_continue, done;
1780 
1781     test_method_data_pointer(mdp, profile_continue);
1782 
1783     // Load the offset of the area within the MDO used for
1784     // parameters. If it's negative we're not profiling any parameters
1785     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1786     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1787 
1788     // Compute a pointer to the area for parameters from the offset

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/basicLock.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 void InterpreterMacroAssembler::narrow(Register result) {
  50 
  51   // Get method->_constMethod->_result_type
  52   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  53   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  54   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  55 
  56   Label done, notBool, notByte, notChar;
  57 
  58   // common case first
  59   cmpw(rscratch1, T_INT);

 248   // and from word offset to byte offset
 249   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 250   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 251   // skip past the header
 252   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 253   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 254 }
 255 
 256 void InterpreterMacroAssembler::get_method_counters(Register method,
 257                                                     Register mcs, Label& skip) {
 258   Label has_counters;
 259   ldr(mcs, Address(method, Method::method_counters_offset()));
 260   cbnz(mcs, has_counters);
 261   call_VM(noreg, CAST_FROM_FN_PTR(address,
 262           InterpreterRuntime::build_method_counters), method);
 263   ldr(mcs, Address(method, Method::method_counters_offset()));
 264   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 265   bind(has_counters);
 266 }
 267 
 268 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 269                                                   Register t1, Register t2,
 270                                                   bool clear_fields, Label& alloc_failed) {
 271   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 272   {
 273     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
 274     // Trigger dtrace event for fastpath
 275     push(atos);
 276     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
 277     pop(atos);
 278   }
 279 }
 280 
 281 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
 282                                                    Register field_index, Register field_offset,
 283                                                    Register temp, Register obj) {
 284   Label alloc_failed, empty_value, done;
 285   const Register src = field_offset;
 286   const Register alloc_temp = rscratch1;
 287   const Register dst_temp   = temp;
 288   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
 289 
 290   // Grab the inline field klass
 291   push(holder_klass);
 292   const Register field_klass = holder_klass;
 293   get_inline_type_field_klass(holder_klass, field_index, field_klass);
 294 
 295   //check for empty value klass
 296   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
 297 
 298   // allocate buffer
 299   push(obj); // save holder
 300   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
 301 
 302   // Have an oop instance buffer, copy into it
 303   data_for_oop(obj, dst_temp, field_klass);
 304   pop(alloc_temp);             // restore holder
 305   lea(src, Address(alloc_temp, field_offset));
 306   // call_VM_leaf, clobbers a few regs, save restore new obj
 307   push(obj);
 308   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
 309   pop(obj);
 310   pop(holder_klass);
 311   b(done);
 312 
 313   bind(empty_value);
 314   get_empty_inline_type_oop(field_klass, dst_temp, obj);
 315   pop(holder_klass);
 316   b(done);
 317 
 318   bind(alloc_failed);
 319   pop(obj);
 320   pop(holder_klass);
 321   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
 322           obj, field_index, holder_klass);
 323 
 324   bind(done);
 325 
 326   // Ensure the stores to copy the inline field contents are visible
 327   // before any subsequent store that publishes this reference.
 328   membar(Assembler::StoreStore);
 329 }
 330 
 331 // Load object from cpool->resolved_references(index)
 332 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 333                                            Register result, Register index, Register tmp) {
 334   assert_different_registers(result, index);
 335 
 336   get_constant_pool(result);
 337   // load pointer for resolved_references[] objArray
 338   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 339   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 340   resolve_oop_handle(result, tmp);
 341   // Add in the index
 342   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 343   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 344 }
 345 
 346 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 347                              Register cpool, Register index, Register klass, Register temp) {
 348   add(temp, cpool, index, LSL, LogBytesPerWord);
 349   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 350   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 357                                                               Register cache) {
 358   const int method_offset = in_bytes(
 359     ConstantPoolCache::base_offset() +
 360       ((byte_no == TemplateTable::f2_byte)
 361        ? ConstantPoolCacheEntry::f2_offset()
 362        : ConstantPoolCacheEntry::f1_offset()));
 363 
 364   ldr(method, Address(cache, method_offset)); // get f1 Method*
 365 }
 366 
 367 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 368 // subtype of super_klass.
 369 //
 370 // Args:
 371 //      r0: superklass
 372 //      Rsub_klass: subklass
 373 //
 374 // Kills:
 375 //      r2, r5
 376 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 377                                                   Label& ok_is_subtype,
 378                                                   bool profile) {
 379   assert(Rsub_klass != r0, "r0 holds superklass");
 380   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 381   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 382 
 383   // Profile the not-null value's klass.
 384   if (profile) {
 385     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 386   }
 387 
 388   // Do the check.
 389   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 390 
 391   // Profile the failure of the check.
 392   if (profile) {
 393     profile_typecheck_failed(r2); // blows r2
 394   }
 395 }
 396 
 397 // Java Expression Stack
 398 
 399 void InterpreterMacroAssembler::pop_ptr(Register r) {
 400   ldr(r, post(esp, wordSize));
 401 }
 402 
 403 void InterpreterMacroAssembler::pop_i(Register r) {
 404   ldrw(r, post(esp, wordSize));
 405 }
 406 
 407 void InterpreterMacroAssembler::pop_l(Register r) {
 408   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 409 }
 410 
 411 void InterpreterMacroAssembler::push_ptr(Register r) {
 412   str(r, pre(esp, -wordSize));
 413  }
 414 

 735 
 736     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 737     bind(entry);
 738     cmp(c_rarg1, r19); // check if bottom reached
 739     br(Assembler::NE, loop); // if not at bottom then check this entry
 740   }
 741 
 742   bind(no_unlock);
 743 
 744   // jvmti support
 745   if (notify_jvmdi) {
 746     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 747   } else {
 748     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 749   }
 750 
 751   // remove activation
 752   // get sender esp
 753   ldr(rscratch2,
 754       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 755 
 756   if (StackReservedPages > 0) {
 757     // testing if reserved zone needs to be re-enabled
 758     Label no_reserved_zone_enabling;
 759 
 760     // look for an overflow into the stack reserved zone, i.e.
 761     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 762     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 763     cmp(rscratch2, rscratch1);
 764     br(Assembler::LS, no_reserved_zone_enabling);
 765 
 766     call_VM_leaf(
 767       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 768     call_VM(noreg, CAST_FROM_FN_PTR(address,
 769                    InterpreterRuntime::throw_delayed_StackOverflowError));
 770     should_not_reach_here();
 771 
 772     bind(no_reserved_zone_enabling);
 773   }
 774 
 775 
 776   if (state == atos && InlineTypeReturnedAsFields) {
 777     // Check if we are returning an non-null inline type and load its fields into registers
 778     Label skip;
 779     test_oop_is_not_inline_type(r0, rscratch2, skip);
 780 
 781     // Load fields from a buffered value with an inline class specific handler
 782     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 783     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 784     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 785     // Unpack handler can be null if inline type is not scalarizable in returns
 786     cbz(rscratch1, skip);
 787 
 788     blr(rscratch1);
 789 #ifdef ASSERT
 790     if (StressInlineTypeReturnedAsFields) {
 791       // TODO 8284443 Enable this for value class returns (L-type descriptor)
 792       Label skip_stress;
 793       ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 794       ldr(rscratch1, Address(rscratch1, Method::const_offset()));
 795       ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
 796       cmpw(rscratch1, (u1) T_PRIMITIVE_OBJECT);
 797       br(Assembler::NE, skip_stress);
 798       load_klass(r0, r0);
 799       orr(r0, r0, 1);
 800       bind(skip_stress);
 801     }
 802 #endif
 803     bind(skip);
 804     // Check above kills sender esp in rscratch2. Reload it.
 805     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 806   }
 807 
 808   // restore sender esp
 809   mov(esp, rscratch2);
 810   // remove frame anchor
 811   leave();
 812   // If we're returning to interpreted code we will shortly be
 813   // adjusting SP to allow some space for ESP.  If we're returning to
 814   // compiled code the saved sender SP was saved in sender_sp, so this
 815   // restores it.
 816   andr(sp, esp, -16);
 817 }
 818 
 819 // Lock object
 820 //
 821 // Args:
 822 //      c_rarg1: BasicObjectLock to be used for locking
 823 //
 824 // Kills:
 825 //      r0
 826 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 827 //      rscratch1, rscratch2 (scratch regs)

 842     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 843     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 844     const int mark_offset = lock_offset +
 845                             BasicLock::displaced_header_offset_in_bytes();
 846 
 847     Label slow_case;
 848 
 849     // Load object pointer into obj_reg %c_rarg3
 850     ldr(obj_reg, Address(lock_reg, obj_offset));
 851 
 852     if (DiagnoseSyncOnValueBasedClasses != 0) {
 853       load_klass(tmp, obj_reg);
 854       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 855       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 856       br(Assembler::NE, slow_case);
 857     }
 858 
 859     // Load (object->mark() | 1) into swap_reg
 860     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 861     orr(swap_reg, rscratch1, 1);
 862     if (EnableValhalla) {
 863       // Mask inline_type bit such that we go to the slow path if object is an inline type
 864       andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 865     }
 866 
 867     // Save (object->mark() | 1) into BasicLock's displaced header
 868     str(swap_reg, Address(lock_reg, mark_offset));
 869 
 870     assert(lock_offset == 0,
 871            "displached header must be first word in BasicObjectLock");
 872 
 873     Label fail;
 874     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 875 
 876     // Fast check for recursive lock.
 877     //
 878     // Can apply the optimization only if this is a stack lock
 879     // allocated in this thread. For efficiency, we can focus on
 880     // recently allocated stack locks (instead of reading the stack
 881     // base and checking whether 'mark' points inside the current
 882     // thread stack):
 883     //  1) (mark & 7) == 0, and
 884     //  2) sp <= mark < mark + os::pagesize()
 885     //

1183     Address data(mdp, in_bytes(JumpData::taken_offset()));
1184     ldr(bumped_count, data);
1185     assert(DataLayout::counter_increment == 1,
1186             "flow-free idiom only works with 1");
1187     // Intel does this to catch overflow
1188     // addptr(bumped_count, DataLayout::counter_increment);
1189     // sbbptr(bumped_count, 0);
1190     // so we do this
1191     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1192     Label L;
1193     br(Assembler::CS, L);       // skip store if counter overflow
1194     str(bumped_count, data);
1195     bind(L);
1196     // The method data pointer needs to be updated to reflect the new target.
1197     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1198     bind(profile_continue);
1199   }
1200 }
1201 
1202 
1203 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1204   if (ProfileInterpreter) {
1205     Label profile_continue;
1206 
1207     // If no method data exists, go to profile_continue.
1208     test_method_data_pointer(mdp, profile_continue);
1209 
1210     // We are taking a branch.  Increment the not taken count.
1211     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1212 
1213     // The method data pointer needs to be updated to correspond to
1214     // the next bytecode
1215     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1216     bind(profile_continue);
1217   }
1218 }
1219 
1220 
1221 void InterpreterMacroAssembler::profile_call(Register mdp) {
1222   if (ProfileInterpreter) {
1223     Label profile_continue;
1224 
1225     // If no method data exists, go to profile_continue.
1226     test_method_data_pointer(mdp, profile_continue);
1227 
1228     // We are making a call.  Increment the count.
1229     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1230 
1231     // The method data pointer needs to be updated to reflect the new target.
1232     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1233     bind(profile_continue);
1234   }
1235 }

1559     // case_array_offset_in_bytes()
1560     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1561     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1562     Assembler::maddw(index, index, reg2, rscratch1);
1563 
1564     // Update the case count
1565     increment_mdp_data_at(mdp,
1566                           index,
1567                           in_bytes(MultiBranchData::relative_count_offset()));
1568 
1569     // The method data pointer needs to be updated.
1570     update_mdp_by_offset(mdp,
1571                          index,
1572                          in_bytes(MultiBranchData::
1573                                   relative_displacement_offset()));
1574 
1575     bind(profile_continue);
1576   }
1577 }
1578 
1579 void InterpreterMacroAssembler::profile_array(Register mdp,
1580                                               Register array,
1581                                               Register tmp) {
1582   if (ProfileInterpreter) {
1583     Label profile_continue;
1584 
1585     // If no method data exists, go to profile_continue.
1586     test_method_data_pointer(mdp, profile_continue);
1587 
1588     mov(tmp, array);
1589     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1590 
1591     Label not_flat;
1592     test_non_flattened_array_oop(array, tmp, not_flat);
1593 
1594     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1595 
1596     bind(not_flat);
1597 
1598     Label not_null_free;
1599     test_non_null_free_array_oop(array, tmp, not_null_free);
1600 
1601     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1602 
1603     bind(not_null_free);
1604 
1605     bind(profile_continue);
1606   }
1607 }
1608 
1609 void InterpreterMacroAssembler::profile_element(Register mdp,
1610                                                 Register element,
1611                                                 Register tmp) {
1612   if (ProfileInterpreter) {
1613     Label profile_continue;
1614 
1615     // If no method data exists, go to profile_continue.
1616     test_method_data_pointer(mdp, profile_continue);
1617 
1618     mov(tmp, element);
1619     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1620 
1621     // The method data pointer needs to be updated.
1622     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1623 
1624     bind(profile_continue);
1625   }
1626 }
1627 
1628 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1629                                              Register left,
1630                                              Register right,
1631                                              Register tmp) {
1632   if (ProfileInterpreter) {
1633     Label profile_continue;
1634 
1635     // If no method data exists, go to profile_continue.
1636     test_method_data_pointer(mdp, profile_continue);
1637 
1638     mov(tmp, left);
1639     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1640 
1641     Label left_not_inline_type;
1642     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1643     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1644     bind(left_not_inline_type);
1645 
1646     mov(tmp, right);
1647     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1648 
1649     Label right_not_inline_type;
1650     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1651     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1652     bind(right_not_inline_type);
1653 
1654     bind(profile_continue);
1655   }
1656 }
1657 
1658 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1659   if (state == atos) {
1660     MacroAssembler::verify_oop(reg);
1661   }
1662 }
1663 
1664 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1665 
1666 
1667 void InterpreterMacroAssembler::notify_method_entry() {
1668   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1669   // track stack depth.  If it is possible to enter interp_only_mode we add
1670   // the code to check if the event should be sent.
1671   if (JvmtiExport::can_post_interpreter_events()) {
1672     Label L;
1673     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1674     cbzw(r3, L);
1675     call_VM(noreg, CAST_FROM_FN_PTR(address,
1676                                     InterpreterRuntime::post_method_entry));
1677     bind(L);

1888         profile_obj_type(tmp, mdo_arg_addr);
1889 
1890         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1891         off_to_args += to_add;
1892       }
1893 
1894       if (MethodData::profile_return()) {
1895         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1896         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1897       }
1898 
1899       add(rscratch1, mdp, off_to_args);
1900       bind(done);
1901       mov(mdp, rscratch1);
1902 
1903       if (MethodData::profile_return()) {
1904         // We're right after the type profile for the last
1905         // argument. tmp is the number of cells left in the
1906         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1907         // if there's a return to profile.
1908         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1909         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1910       }
1911       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1912     } else {
1913       assert(MethodData::profile_return(), "either profile call args or call ret");
1914       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1915     }
1916 
1917     // mdp points right after the end of the
1918     // CallTypeData/VirtualCallTypeData, right after the cells for the
1919     // return value type if there's one
1920 
1921     bind(profile_continue);
1922   }
1923 }
1924 
1925 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1926   assert_different_registers(mdp, ret, tmp, rbcp);
1927   if (ProfileInterpreter && MethodData::profile_return()) {
1928     Label profile_continue, done;

1934 
1935       // If we don't profile all invoke bytecodes we must make sure
1936       // it's a bytecode we indeed profile. We can't go back to the
1937       // begining of the ProfileData we intend to update to check its
1938       // type because we're right after it and we don't known its
1939       // length
1940       Label do_profile;
1941       ldrb(rscratch1, Address(rbcp, 0));
1942       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1943       br(Assembler::EQ, do_profile);
1944       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1945       br(Assembler::EQ, do_profile);
1946       get_method(tmp);
1947       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1948       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1949       br(Assembler::NE, profile_continue);
1950 
1951       bind(do_profile);
1952     }
1953 
1954     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1955     mov(tmp, ret);
1956     profile_obj_type(tmp, mdo_ret_addr);
1957 
1958     bind(profile_continue);
1959   }
1960 }
1961 
1962 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1963   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1964   if (ProfileInterpreter && MethodData::profile_parameters()) {
1965     Label profile_continue, done;
1966 
1967     test_method_data_pointer(mdp, profile_continue);
1968 
1969     // Load the offset of the area within the MDO used for
1970     // parameters. If it's negative we're not profiling any parameters
1971     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1972     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1973 
1974     // Compute a pointer to the area for parameters from the offset
< prev index next >