< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"

  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 void InterpreterMacroAssembler::narrow(Register result) {
  49 
  50   // Get method->_constMethod->_result_type
  51   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  52   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  53   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  54 
  55   Label done, notBool, notByte, notChar;
  56 
  57   // common case first
  58   cmpw(rscratch1, T_INT);

 247   // and from word offset to byte offset
 248   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 249   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 250   // skip past the header
 251   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 252   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 253 }
 254 
 255 void InterpreterMacroAssembler::get_method_counters(Register method,
 256                                                     Register mcs, Label& skip) {
 257   Label has_counters;
 258   ldr(mcs, Address(method, Method::method_counters_offset()));
 259   cbnz(mcs, has_counters);
 260   call_VM(noreg, CAST_FROM_FN_PTR(address,
 261           InterpreterRuntime::build_method_counters), method);
 262   ldr(mcs, Address(method, Method::method_counters_offset()));
 263   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 264   bind(has_counters);
 265 }
 266 































































 267 // Load object from cpool->resolved_references(index)
 268 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 269                                            Register result, Register index, Register tmp) {
 270   assert_different_registers(result, index);
 271 
 272   get_constant_pool(result);
 273   // load pointer for resolved_references[] objArray
 274   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 275   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 276   resolve_oop_handle(result, tmp);
 277   // Add in the index
 278   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 279   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 280 }
 281 
 282 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 283                              Register cpool, Register index, Register klass, Register temp) {
 284   add(temp, cpool, index, LSL, LogBytesPerWord);
 285   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 286   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 293                                                               Register cache) {
 294   const int method_offset = in_bytes(
 295     ConstantPoolCache::base_offset() +
 296       ((byte_no == TemplateTable::f2_byte)
 297        ? ConstantPoolCacheEntry::f2_offset()
 298        : ConstantPoolCacheEntry::f1_offset()));
 299 
 300   ldr(method, Address(cache, method_offset)); // get f1 Method*
 301 }
 302 
 303 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 304 // subtype of super_klass.
 305 //
 306 // Args:
 307 //      r0: superklass
 308 //      Rsub_klass: subklass
 309 //
 310 // Kills:
 311 //      r2, r5
 312 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 313                                                   Label& ok_is_subtype) {

 314   assert(Rsub_klass != r0, "r0 holds superklass");
 315   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 316   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 317 
 318   // Profile the not-null value's klass.
 319   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5


 320 
 321   // Do the check.
 322   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 323 
 324   // Profile the failure of the check.
 325   profile_typecheck_failed(r2); // blows r2


 326 }
 327 
 328 // Java Expression Stack
 329 
 330 void InterpreterMacroAssembler::pop_ptr(Register r) {
 331   ldr(r, post(esp, wordSize));
 332 }
 333 
 334 void InterpreterMacroAssembler::pop_i(Register r) {
 335   ldrw(r, post(esp, wordSize));
 336 }
 337 
 338 void InterpreterMacroAssembler::pop_l(Register r) {
 339   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 340 }
 341 
 342 void InterpreterMacroAssembler::push_ptr(Register r) {
 343   str(r, pre(esp, -wordSize));
 344  }
 345 

 666 
 667     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 668     bind(entry);
 669     cmp(c_rarg1, r19); // check if bottom reached
 670     br(Assembler::NE, loop); // if not at bottom then check this entry
 671   }
 672 
 673   bind(no_unlock);
 674 
 675   // jvmti support
 676   if (notify_jvmdi) {
 677     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 678   } else {
 679     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 680   }
 681 
 682   // remove activation
 683   // get sender esp
 684   ldr(rscratch2,
 685       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 686   if (StackReservedPages > 0) {
 687     // testing if reserved zone needs to be re-enabled
 688     Label no_reserved_zone_enabling;
 689 
 690     // look for an overflow into the stack reserved zone, i.e.
 691     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 692     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 693     cmp(rscratch2, rscratch1);
 694     br(Assembler::LS, no_reserved_zone_enabling);
 695 
 696     call_VM_leaf(
 697       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 698     call_VM(noreg, CAST_FROM_FN_PTR(address,
 699                    InterpreterRuntime::throw_delayed_StackOverflowError));
 700     should_not_reach_here();
 701 
 702     bind(no_reserved_zone_enabling);
 703   }
 704 

























 705   // restore sender esp
 706   mov(esp, rscratch2);
 707   // remove frame anchor
 708   leave();
 709   // If we're returning to interpreted code we will shortly be
 710   // adjusting SP to allow some space for ESP.  If we're returning to
 711   // compiled code the saved sender SP was saved in sender_sp, so this
 712   // restores it.
 713   andr(sp, esp, -16);
 714 }
 715 
 716 // Lock object
 717 //
 718 // Args:
 719 //      c_rarg1: BasicObjectLock to be used for locking
 720 //
 721 // Kills:
 722 //      r0
 723 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 724 //      rscratch1, rscratch2 (scratch regs)

 739     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 740     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 741     const int mark_offset = lock_offset +
 742                             BasicLock::displaced_header_offset_in_bytes();
 743 
 744     Label slow_case;
 745 
 746     // Load object pointer into obj_reg %c_rarg3
 747     ldr(obj_reg, Address(lock_reg, obj_offset));
 748 
 749     if (DiagnoseSyncOnValueBasedClasses != 0) {
 750       load_klass(tmp, obj_reg);
 751       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 752       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 753       br(Assembler::NE, slow_case);
 754     }
 755 
 756     // Load (object->mark() | 1) into swap_reg
 757     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 758     orr(swap_reg, rscratch1, 1);




 759 
 760     // Save (object->mark() | 1) into BasicLock's displaced header
 761     str(swap_reg, Address(lock_reg, mark_offset));
 762 
 763     assert(lock_offset == 0,
 764            "displached header must be first word in BasicObjectLock");
 765 
 766     Label fail;
 767     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 768 
 769     // Fast check for recursive lock.
 770     //
 771     // Can apply the optimization only if this is a stack lock
 772     // allocated in this thread. For efficiency, we can focus on
 773     // recently allocated stack locks (instead of reading the stack
 774     // base and checking whether 'mark' points inside the current
 775     // thread stack):
 776     //  1) (mark & 7) == 0, and
 777     //  2) sp <= mark < mark + os::pagesize()
 778     //

1076     Address data(mdp, in_bytes(JumpData::taken_offset()));
1077     ldr(bumped_count, data);
1078     assert(DataLayout::counter_increment == 1,
1079             "flow-free idiom only works with 1");
1080     // Intel does this to catch overflow
1081     // addptr(bumped_count, DataLayout::counter_increment);
1082     // sbbptr(bumped_count, 0);
1083     // so we do this
1084     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1085     Label L;
1086     br(Assembler::CS, L);       // skip store if counter overflow
1087     str(bumped_count, data);
1088     bind(L);
1089     // The method data pointer needs to be updated to reflect the new target.
1090     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1091     bind(profile_continue);
1092   }
1093 }
1094 
1095 
1096 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1097   if (ProfileInterpreter) {
1098     Label profile_continue;
1099 
1100     // If no method data exists, go to profile_continue.
1101     test_method_data_pointer(mdp, profile_continue);
1102 
1103     // We are taking a branch.  Increment the not taken count.
1104     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1105 
1106     // The method data pointer needs to be updated to correspond to
1107     // the next bytecode
1108     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1109     bind(profile_continue);
1110   }
1111 }
1112 
1113 
1114 void InterpreterMacroAssembler::profile_call(Register mdp) {
1115   if (ProfileInterpreter) {
1116     Label profile_continue;
1117 
1118     // If no method data exists, go to profile_continue.
1119     test_method_data_pointer(mdp, profile_continue);
1120 
1121     // We are making a call.  Increment the count.
1122     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1123 
1124     // The method data pointer needs to be updated to reflect the new target.
1125     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1126     bind(profile_continue);
1127   }
1128 }

1452     // case_array_offset_in_bytes()
1453     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1454     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1455     Assembler::maddw(index, index, reg2, rscratch1);
1456 
1457     // Update the case count
1458     increment_mdp_data_at(mdp,
1459                           index,
1460                           in_bytes(MultiBranchData::relative_count_offset()));
1461 
1462     // The method data pointer needs to be updated.
1463     update_mdp_by_offset(mdp,
1464                          index,
1465                          in_bytes(MultiBranchData::
1466                                   relative_displacement_offset()));
1467 
1468     bind(profile_continue);
1469   }
1470 }
1471 















































































1472 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1473   if (state == atos) {
1474     MacroAssembler::verify_oop(reg);
1475   }
1476 }
1477 
1478 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1479 
1480 
1481 void InterpreterMacroAssembler::notify_method_entry() {
1482   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1483   // track stack depth.  If it is possible to enter interp_only_mode we add
1484   // the code to check if the event should be sent.
1485   if (JvmtiExport::can_post_interpreter_events()) {
1486     Label L;
1487     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1488     cbzw(r3, L);
1489     call_VM(noreg, CAST_FROM_FN_PTR(address,
1490                                     InterpreterRuntime::post_method_entry));
1491     bind(L);

1702         profile_obj_type(tmp, mdo_arg_addr);
1703 
1704         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1705         off_to_args += to_add;
1706       }
1707 
1708       if (MethodData::profile_return()) {
1709         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1710         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1711       }
1712 
1713       add(rscratch1, mdp, off_to_args);
1714       bind(done);
1715       mov(mdp, rscratch1);
1716 
1717       if (MethodData::profile_return()) {
1718         // We're right after the type profile for the last
1719         // argument. tmp is the number of cells left in the
1720         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1721         // if there's a return to profile.
1722         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1723         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1724       }
1725       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1726     } else {
1727       assert(MethodData::profile_return(), "either profile call args or call ret");
1728       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1729     }
1730 
1731     // mdp points right after the end of the
1732     // CallTypeData/VirtualCallTypeData, right after the cells for the
1733     // return value type if there's one
1734 
1735     bind(profile_continue);
1736   }
1737 }
1738 
1739 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1740   assert_different_registers(mdp, ret, tmp, rbcp);
1741   if (ProfileInterpreter && MethodData::profile_return()) {
1742     Label profile_continue, done;

1748 
1749       // If we don't profile all invoke bytecodes we must make sure
1750       // it's a bytecode we indeed profile. We can't go back to the
1751       // begining of the ProfileData we intend to update to check its
1752       // type because we're right after it and we don't known its
1753       // length
1754       Label do_profile;
1755       ldrb(rscratch1, Address(rbcp, 0));
1756       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1757       br(Assembler::EQ, do_profile);
1758       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1759       br(Assembler::EQ, do_profile);
1760       get_method(tmp);
1761       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1762       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1763       br(Assembler::NE, profile_continue);
1764 
1765       bind(do_profile);
1766     }
1767 
1768     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1769     mov(tmp, ret);
1770     profile_obj_type(tmp, mdo_ret_addr);
1771 
1772     bind(profile_continue);
1773   }
1774 }
1775 
1776 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1777   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1778   if (ProfileInterpreter && MethodData::profile_parameters()) {
1779     Label profile_continue, done;
1780 
1781     test_method_data_pointer(mdp, profile_continue);
1782 
1783     // Load the offset of the area within the MDO used for
1784     // parameters. If it's negative we're not profiling any parameters
1785     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1786     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1787 
1788     // Compute a pointer to the area for parameters from the offset

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/inlineKlass.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/basicLock.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 
  49 void InterpreterMacroAssembler::narrow(Register result) {
  50 
  51   // Get method->_constMethod->_result_type
  52   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  53   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  54   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  55 
  56   Label done, notBool, notByte, notChar;
  57 
  58   // common case first
  59   cmpw(rscratch1, T_INT);

 248   // and from word offset to byte offset
 249   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 250   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 251   // skip past the header
 252   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 253   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 254 }
 255 
 256 void InterpreterMacroAssembler::get_method_counters(Register method,
 257                                                     Register mcs, Label& skip) {
 258   Label has_counters;
 259   ldr(mcs, Address(method, Method::method_counters_offset()));
 260   cbnz(mcs, has_counters);
 261   call_VM(noreg, CAST_FROM_FN_PTR(address,
 262           InterpreterRuntime::build_method_counters), method);
 263   ldr(mcs, Address(method, Method::method_counters_offset()));
 264   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 265   bind(has_counters);
 266 }
 267 
 268 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 269                                                   Register t1, Register t2,
 270                                                   bool clear_fields, Label& alloc_failed) {
 271   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 272   {
 273     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
 274     // Trigger dtrace event for fastpath
 275     push(atos);
 276     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), new_obj);
 277     pop(atos);
 278   }
 279 }
 280 
 281 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
 282                                                    Register field_index, Register field_offset,
 283                                                    Register temp, Register obj) {
 284   Label alloc_failed, empty_value, done;
 285   const Register src = field_offset;
 286   const Register alloc_temp = rscratch1;
 287   const Register dst_temp   = temp;
 288   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
 289 
 290   // Grab the inline field klass
 291   push(holder_klass);
 292   const Register field_klass = holder_klass;
 293   get_inline_type_field_klass(holder_klass, field_index, field_klass);
 294 
 295   //check for empty value klass
 296   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
 297 
 298   // allocate buffer
 299   push(obj); // save holder
 300   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
 301 
 302   // Have an oop instance buffer, copy into it
 303   data_for_oop(obj, dst_temp, field_klass);
 304   pop(alloc_temp);             // restore holder
 305   lea(src, Address(alloc_temp, field_offset));
 306   // call_VM_leaf, clobbers a few regs, save restore new obj
 307   push(obj);
 308   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
 309   pop(obj);
 310   pop(holder_klass);
 311   b(done);
 312 
 313   bind(empty_value);
 314   get_empty_inline_type_oop(field_klass, dst_temp, obj);
 315   pop(holder_klass);
 316   b(done);
 317 
 318   bind(alloc_failed);
 319   pop(obj);
 320   pop(holder_klass);
 321   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
 322           obj, field_index, holder_klass);
 323 
 324   bind(done);
 325 
 326   // Ensure the stores to copy the inline field contents are visible
 327   // before any subsequent store that publishes this reference.
 328   membar(Assembler::StoreStore);
 329 }
 330 
 331 // Load object from cpool->resolved_references(index)
 332 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 333                                            Register result, Register index, Register tmp) {
 334   assert_different_registers(result, index);
 335 
 336   get_constant_pool(result);
 337   // load pointer for resolved_references[] objArray
 338   ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
 339   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
 340   resolve_oop_handle(result, tmp);
 341   // Add in the index
 342   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 343   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
 344 }
 345 
 346 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 347                              Register cpool, Register index, Register klass, Register temp) {
 348   add(temp, cpool, index, LSL, LogBytesPerWord);
 349   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 350   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses

 357                                                               Register cache) {
 358   const int method_offset = in_bytes(
 359     ConstantPoolCache::base_offset() +
 360       ((byte_no == TemplateTable::f2_byte)
 361        ? ConstantPoolCacheEntry::f2_offset()
 362        : ConstantPoolCacheEntry::f1_offset()));
 363 
 364   ldr(method, Address(cache, method_offset)); // get f1 Method*
 365 }
 366 
 367 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 368 // subtype of super_klass.
 369 //
 370 // Args:
 371 //      r0: superklass
 372 //      Rsub_klass: subklass
 373 //
 374 // Kills:
 375 //      r2, r5
 376 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 377                                                   Label& ok_is_subtype,
 378                                                   bool profile) {
 379   assert(Rsub_klass != r0, "r0 holds superklass");
 380   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 381   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 382 
 383   // Profile the not-null value's klass.
 384   if (profile) {
 385     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 386   }
 387 
 388   // Do the check.
 389   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 390 
 391   // Profile the failure of the check.
 392   if (profile) {
 393     profile_typecheck_failed(r2); // blows r2
 394   }
 395 }
 396 
 397 // Java Expression Stack
 398 
 399 void InterpreterMacroAssembler::pop_ptr(Register r) {
 400   ldr(r, post(esp, wordSize));
 401 }
 402 
 403 void InterpreterMacroAssembler::pop_i(Register r) {
 404   ldrw(r, post(esp, wordSize));
 405 }
 406 
 407 void InterpreterMacroAssembler::pop_l(Register r) {
 408   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 409 }
 410 
 411 void InterpreterMacroAssembler::push_ptr(Register r) {
 412   str(r, pre(esp, -wordSize));
 413  }
 414 

 735 
 736     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 737     bind(entry);
 738     cmp(c_rarg1, r19); // check if bottom reached
 739     br(Assembler::NE, loop); // if not at bottom then check this entry
 740   }
 741 
 742   bind(no_unlock);
 743 
 744   // jvmti support
 745   if (notify_jvmdi) {
 746     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 747   } else {
 748     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 749   }
 750 
 751   // remove activation
 752   // get sender esp
 753   ldr(rscratch2,
 754       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 755 
 756   if (StackReservedPages > 0) {
 757     // testing if reserved zone needs to be re-enabled
 758     Label no_reserved_zone_enabling;
 759 
 760     // look for an overflow into the stack reserved zone, i.e.
 761     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 762     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 763     cmp(rscratch2, rscratch1);
 764     br(Assembler::LS, no_reserved_zone_enabling);
 765 
 766     call_VM_leaf(
 767       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 768     call_VM(noreg, CAST_FROM_FN_PTR(address,
 769                    InterpreterRuntime::throw_delayed_StackOverflowError));
 770     should_not_reach_here();
 771 
 772     bind(no_reserved_zone_enabling);
 773   }
 774 
 775 
 776   if (state == atos && InlineTypeReturnedAsFields) {
 777     Label skip;
 778     // Test if the return type is an inline type
 779     ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 780     ldr(rscratch1, Address(rscratch1, Method::const_offset()));
 781     ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
 782     cmpw(rscratch1, (u1) T_INLINE_TYPE);
 783     br(Assembler::NE, skip);
 784 
 785     // We are returning an inline type, load its fields into registers
 786     // Load fields from a buffered value with an inline class specific handler
 787 
 788     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 789     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 790     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 791     cbz(rscratch1, skip);
 792 
 793     blr(rscratch1);
 794 
 795     // call above kills sender esp in rscratch2. Reload it.
 796     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 797     bind(skip);
 798   }
 799 
 800   // restore sender esp
 801   mov(esp, rscratch2);
 802   // remove frame anchor
 803   leave();
 804   // If we're returning to interpreted code we will shortly be
 805   // adjusting SP to allow some space for ESP.  If we're returning to
 806   // compiled code the saved sender SP was saved in sender_sp, so this
 807   // restores it.
 808   andr(sp, esp, -16);
 809 }
 810 
 811 // Lock object
 812 //
 813 // Args:
 814 //      c_rarg1: BasicObjectLock to be used for locking
 815 //
 816 // Kills:
 817 //      r0
 818 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 819 //      rscratch1, rscratch2 (scratch regs)

 834     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 835     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 836     const int mark_offset = lock_offset +
 837                             BasicLock::displaced_header_offset_in_bytes();
 838 
 839     Label slow_case;
 840 
 841     // Load object pointer into obj_reg %c_rarg3
 842     ldr(obj_reg, Address(lock_reg, obj_offset));
 843 
 844     if (DiagnoseSyncOnValueBasedClasses != 0) {
 845       load_klass(tmp, obj_reg);
 846       ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
 847       tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
 848       br(Assembler::NE, slow_case);
 849     }
 850 
 851     // Load (object->mark() | 1) into swap_reg
 852     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 853     orr(swap_reg, rscratch1, 1);
 854     if (EnableValhalla) {
 855       // Mask inline_type bit such that we go to the slow path if object is an inline type
 856       andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 857     }
 858 
 859     // Save (object->mark() | 1) into BasicLock's displaced header
 860     str(swap_reg, Address(lock_reg, mark_offset));
 861 
 862     assert(lock_offset == 0,
 863            "displached header must be first word in BasicObjectLock");
 864 
 865     Label fail;
 866     cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 867 
 868     // Fast check for recursive lock.
 869     //
 870     // Can apply the optimization only if this is a stack lock
 871     // allocated in this thread. For efficiency, we can focus on
 872     // recently allocated stack locks (instead of reading the stack
 873     // base and checking whether 'mark' points inside the current
 874     // thread stack):
 875     //  1) (mark & 7) == 0, and
 876     //  2) sp <= mark < mark + os::pagesize()
 877     //

1175     Address data(mdp, in_bytes(JumpData::taken_offset()));
1176     ldr(bumped_count, data);
1177     assert(DataLayout::counter_increment == 1,
1178             "flow-free idiom only works with 1");
1179     // Intel does this to catch overflow
1180     // addptr(bumped_count, DataLayout::counter_increment);
1181     // sbbptr(bumped_count, 0);
1182     // so we do this
1183     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1184     Label L;
1185     br(Assembler::CS, L);       // skip store if counter overflow
1186     str(bumped_count, data);
1187     bind(L);
1188     // The method data pointer needs to be updated to reflect the new target.
1189     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1190     bind(profile_continue);
1191   }
1192 }
1193 
1194 
1195 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1196   if (ProfileInterpreter) {
1197     Label profile_continue;
1198 
1199     // If no method data exists, go to profile_continue.
1200     test_method_data_pointer(mdp, profile_continue);
1201 
1202     // We are taking a branch.  Increment the not taken count.
1203     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1204 
1205     // The method data pointer needs to be updated to correspond to
1206     // the next bytecode
1207     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1208     bind(profile_continue);
1209   }
1210 }
1211 
1212 
1213 void InterpreterMacroAssembler::profile_call(Register mdp) {
1214   if (ProfileInterpreter) {
1215     Label profile_continue;
1216 
1217     // If no method data exists, go to profile_continue.
1218     test_method_data_pointer(mdp, profile_continue);
1219 
1220     // We are making a call.  Increment the count.
1221     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1222 
1223     // The method data pointer needs to be updated to reflect the new target.
1224     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1225     bind(profile_continue);
1226   }
1227 }

1551     // case_array_offset_in_bytes()
1552     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1553     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1554     Assembler::maddw(index, index, reg2, rscratch1);
1555 
1556     // Update the case count
1557     increment_mdp_data_at(mdp,
1558                           index,
1559                           in_bytes(MultiBranchData::relative_count_offset()));
1560 
1561     // The method data pointer needs to be updated.
1562     update_mdp_by_offset(mdp,
1563                          index,
1564                          in_bytes(MultiBranchData::
1565                                   relative_displacement_offset()));
1566 
1567     bind(profile_continue);
1568   }
1569 }
1570 
1571 void InterpreterMacroAssembler::profile_array(Register mdp,
1572                                               Register array,
1573                                               Register tmp) {
1574   if (ProfileInterpreter) {
1575     Label profile_continue;
1576 
1577     // If no method data exists, go to profile_continue.
1578     test_method_data_pointer(mdp, profile_continue);
1579 
1580     mov(tmp, array);
1581     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1582 
1583     Label not_flat;
1584     test_non_flattened_array_oop(array, tmp, not_flat);
1585 
1586     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1587 
1588     bind(not_flat);
1589 
1590     Label not_null_free;
1591     test_non_null_free_array_oop(array, tmp, not_null_free);
1592 
1593     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1594 
1595     bind(not_null_free);
1596 
1597     bind(profile_continue);
1598   }
1599 }
1600 
1601 void InterpreterMacroAssembler::profile_element(Register mdp,
1602                                                 Register element,
1603                                                 Register tmp) {
1604   if (ProfileInterpreter) {
1605     Label profile_continue;
1606 
1607     // If no method data exists, go to profile_continue.
1608     test_method_data_pointer(mdp, profile_continue);
1609 
1610     mov(tmp, element);
1611     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1612 
1613     // The method data pointer needs to be updated.
1614     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1615 
1616     bind(profile_continue);
1617   }
1618 }
1619 
1620 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1621                                              Register left,
1622                                              Register right,
1623                                              Register tmp) {
1624   if (ProfileInterpreter) {
1625     Label profile_continue;
1626 
1627     // If no method data exists, go to profile_continue.
1628     test_method_data_pointer(mdp, profile_continue);
1629 
1630     mov(tmp, left);
1631     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1632 
1633     Label left_not_inline_type;
1634     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1635     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1636     bind(left_not_inline_type);
1637 
1638     mov(tmp, right);
1639     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1640 
1641     Label right_not_inline_type;
1642     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1643     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1644     bind(right_not_inline_type);
1645 
1646     bind(profile_continue);
1647   }
1648 }
1649 
1650 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1651   if (state == atos) {
1652     MacroAssembler::verify_oop(reg);
1653   }
1654 }
1655 
1656 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1657 
1658 
1659 void InterpreterMacroAssembler::notify_method_entry() {
1660   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1661   // track stack depth.  If it is possible to enter interp_only_mode we add
1662   // the code to check if the event should be sent.
1663   if (JvmtiExport::can_post_interpreter_events()) {
1664     Label L;
1665     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1666     cbzw(r3, L);
1667     call_VM(noreg, CAST_FROM_FN_PTR(address,
1668                                     InterpreterRuntime::post_method_entry));
1669     bind(L);

1880         profile_obj_type(tmp, mdo_arg_addr);
1881 
1882         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1883         off_to_args += to_add;
1884       }
1885 
1886       if (MethodData::profile_return()) {
1887         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1888         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1889       }
1890 
1891       add(rscratch1, mdp, off_to_args);
1892       bind(done);
1893       mov(mdp, rscratch1);
1894 
1895       if (MethodData::profile_return()) {
1896         // We're right after the type profile for the last
1897         // argument. tmp is the number of cells left in the
1898         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1899         // if there's a return to profile.
1900         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1901         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1902       }
1903       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1904     } else {
1905       assert(MethodData::profile_return(), "either profile call args or call ret");
1906       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1907     }
1908 
1909     // mdp points right after the end of the
1910     // CallTypeData/VirtualCallTypeData, right after the cells for the
1911     // return value type if there's one
1912 
1913     bind(profile_continue);
1914   }
1915 }
1916 
1917 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1918   assert_different_registers(mdp, ret, tmp, rbcp);
1919   if (ProfileInterpreter && MethodData::profile_return()) {
1920     Label profile_continue, done;

1926 
1927       // If we don't profile all invoke bytecodes we must make sure
1928       // it's a bytecode we indeed profile. We can't go back to the
1929       // begining of the ProfileData we intend to update to check its
1930       // type because we're right after it and we don't known its
1931       // length
1932       Label do_profile;
1933       ldrb(rscratch1, Address(rbcp, 0));
1934       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1935       br(Assembler::EQ, do_profile);
1936       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1937       br(Assembler::EQ, do_profile);
1938       get_method(tmp);
1939       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
1940       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1941       br(Assembler::NE, profile_continue);
1942 
1943       bind(do_profile);
1944     }
1945 
1946     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1947     mov(tmp, ret);
1948     profile_obj_type(tmp, mdo_ret_addr);
1949 
1950     bind(profile_continue);
1951   }
1952 }
1953 
1954 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1955   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1956   if (ProfileInterpreter && MethodData::profile_parameters()) {
1957     Label profile_continue, done;
1958 
1959     test_method_data_pointer(mdp, profile_continue);
1960 
1961     // Load the offset of the area within the MDO used for
1962     // parameters. If it's negative we're not profiling any parameters
1963     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1964     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1965 
1966     // Compute a pointer to the area for parameters from the offset
< prev index next >