16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/markWord.hpp"
37 #include "oops/method.hpp"
38 #include "oops/methodData.hpp"
39 #include "oops/resolvedIndyEntry.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "prims/jvmtiThreadState.hpp"
42 #include "runtime/basicLock.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/javaThread.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "utilities/powerOfTwo.hpp"
48
49 void InterpreterMacroAssembler::narrow(Register result) {
50
51 // Get method->_constMethod->_result_type
52 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
53 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
54 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
55
56 Label done, notBool, notByte, notChar;
57
58 // common case first
253 // and from word offset to byte offset
254 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
255 ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
256 // skip past the header
257 add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
258 add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry
259 }
260
261 void InterpreterMacroAssembler::get_method_counters(Register method,
262 Register mcs, Label& skip) {
263 Label has_counters;
264 ldr(mcs, Address(method, Method::method_counters_offset()));
265 cbnz(mcs, has_counters);
266 call_VM(noreg, CAST_FROM_FN_PTR(address,
267 InterpreterRuntime::build_method_counters), method);
268 ldr(mcs, Address(method, Method::method_counters_offset()));
269 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
270 bind(has_counters);
271 }
272
273 // Load object from cpool->resolved_references(index)
274 void InterpreterMacroAssembler::load_resolved_reference_at_index(
275 Register result, Register index, Register tmp) {
276 assert_different_registers(result, index);
277
278 get_constant_pool(result);
279 // load pointer for resolved_references[] objArray
280 ldr(result, Address(result, ConstantPool::cache_offset()));
281 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
282 resolve_oop_handle(result, tmp, rscratch2);
283 // Add in the index
284 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
285 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
286 }
287
288 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
289 Register cpool, Register index, Register klass, Register temp) {
290 add(temp, cpool, index, LSL, LogBytesPerWord);
291 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
292 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
299 Register cache) {
300 const int method_offset = in_bytes(
301 ConstantPoolCache::base_offset() +
302 ((byte_no == TemplateTable::f2_byte)
303 ? ConstantPoolCacheEntry::f2_offset()
304 : ConstantPoolCacheEntry::f1_offset()));
305
306 ldr(method, Address(cache, method_offset)); // get f1 Method*
307 }
308
309 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
310 // subtype of super_klass.
311 //
312 // Args:
313 // r0: superklass
314 // Rsub_klass: subklass
315 //
316 // Kills:
317 // r2, r5
318 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
319 Label& ok_is_subtype) {
320 assert(Rsub_klass != r0, "r0 holds superklass");
321 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
322 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
323
324 // Profile the not-null value's klass.
325 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
326
327 // Do the check.
328 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
329
330 // Profile the failure of the check.
331 profile_typecheck_failed(r2); // blows r2
332 }
333
334 // Java Expression Stack
335
336 void InterpreterMacroAssembler::pop_ptr(Register r) {
337 ldr(r, post(esp, wordSize));
338 }
339
340 void InterpreterMacroAssembler::pop_i(Register r) {
341 ldrw(r, post(esp, wordSize));
342 }
343
344 void InterpreterMacroAssembler::pop_l(Register r) {
345 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
346 }
347
348 void InterpreterMacroAssembler::push_ptr(Register r) {
349 str(r, pre(esp, -wordSize));
350 }
351
672
673 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
674 bind(entry);
675 cmp(c_rarg1, r19); // check if bottom reached
676 br(Assembler::NE, loop); // if not at bottom then check this entry
677 }
678
679 bind(no_unlock);
680
681 // jvmti support
682 if (notify_jvmdi) {
683 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
684 } else {
685 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
686 }
687
688 // remove activation
689 // get sender esp
690 ldr(rscratch2,
691 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
692 if (StackReservedPages > 0) {
693 // testing if reserved zone needs to be re-enabled
694 Label no_reserved_zone_enabling;
695
696 // check if already enabled - if so no re-enabling needed
697 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
698 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
699 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
700 br(Assembler::EQ, no_reserved_zone_enabling);
701
702 // look for an overflow into the stack reserved zone, i.e.
703 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
704 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
705 cmp(rscratch2, rscratch1);
706 br(Assembler::LS, no_reserved_zone_enabling);
707
708 call_VM_leaf(
709 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
710 call_VM(noreg, CAST_FROM_FN_PTR(address,
711 InterpreterRuntime::throw_delayed_StackOverflowError));
712 should_not_reach_here();
713
714 bind(no_reserved_zone_enabling);
715 }
716
717 // restore sender esp
718 mov(esp, rscratch2);
719 // remove frame anchor
720 leave();
721 // If we're returning to interpreted code we will shortly be
722 // adjusting SP to allow some space for ESP. If we're returning to
723 // compiled code the saved sender SP was saved in sender_sp, so this
724 // restores it.
725 andr(sp, esp, -16);
726 }
727
728 // Lock object
729 //
730 // Args:
731 // c_rarg1: BasicObjectLock to be used for locking
732 //
733 // Kills:
734 // r0
735 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
736 // rscratch1, rscratch2 (scratch regs)
756 Label slow_case;
757
758 // Load object pointer into obj_reg %c_rarg3
759 ldr(obj_reg, Address(lock_reg, obj_offset));
760
761 if (DiagnoseSyncOnValueBasedClasses != 0) {
762 load_klass(tmp, obj_reg);
763 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
764 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
765 br(Assembler::NE, slow_case);
766 }
767
768 if (LockingMode == LM_LIGHTWEIGHT) {
769 ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
770 fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
771 b(count);
772 } else if (LockingMode == LM_LEGACY) {
773 // Load (object->mark() | 1) into swap_reg
774 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
775 orr(swap_reg, rscratch1, 1);
776
777 // Save (object->mark() | 1) into BasicLock's displaced header
778 str(swap_reg, Address(lock_reg, mark_offset));
779
780 assert(lock_offset == 0,
781 "displached header must be first word in BasicObjectLock");
782
783 Label fail;
784 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
785
786 // Fast check for recursive lock.
787 //
788 // Can apply the optimization only if this is a stack lock
789 // allocated in this thread. For efficiency, we can focus on
790 // recently allocated stack locks (instead of reading the stack
791 // base and checking whether 'mark' points inside the current
792 // thread stack):
793 // 1) (mark & 7) == 0, and
794 // 2) sp <= mark < mark + os::pagesize()
795 //
1130 Address data(mdp, in_bytes(JumpData::taken_offset()));
1131 ldr(bumped_count, data);
1132 assert(DataLayout::counter_increment == 1,
1133 "flow-free idiom only works with 1");
1134 // Intel does this to catch overflow
1135 // addptr(bumped_count, DataLayout::counter_increment);
1136 // sbbptr(bumped_count, 0);
1137 // so we do this
1138 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1139 Label L;
1140 br(Assembler::CS, L); // skip store if counter overflow
1141 str(bumped_count, data);
1142 bind(L);
1143 // The method data pointer needs to be updated to reflect the new target.
1144 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1145 bind(profile_continue);
1146 }
1147 }
1148
1149
1150 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1151 if (ProfileInterpreter) {
1152 Label profile_continue;
1153
1154 // If no method data exists, go to profile_continue.
1155 test_method_data_pointer(mdp, profile_continue);
1156
1157 // We are taking a branch. Increment the not taken count.
1158 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1159
1160 // The method data pointer needs to be updated to correspond to
1161 // the next bytecode
1162 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1163 bind(profile_continue);
1164 }
1165 }
1166
1167
1168 void InterpreterMacroAssembler::profile_call(Register mdp) {
1169 if (ProfileInterpreter) {
1170 Label profile_continue;
1171
1172 // If no method data exists, go to profile_continue.
1173 test_method_data_pointer(mdp, profile_continue);
1174
1175 // We are making a call. Increment the count.
1176 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1177
1178 // The method data pointer needs to be updated to reflect the new target.
1179 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1180 bind(profile_continue);
1181 }
1182 }
1506 // case_array_offset_in_bytes()
1507 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1508 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1509 Assembler::maddw(index, index, reg2, rscratch1);
1510
1511 // Update the case count
1512 increment_mdp_data_at(mdp,
1513 index,
1514 in_bytes(MultiBranchData::relative_count_offset()));
1515
1516 // The method data pointer needs to be updated.
1517 update_mdp_by_offset(mdp,
1518 index,
1519 in_bytes(MultiBranchData::
1520 relative_displacement_offset()));
1521
1522 bind(profile_continue);
1523 }
1524 }
1525
1526 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1527 if (state == atos) {
1528 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1529 }
1530 }
1531
1532 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1533
1534
1535 void InterpreterMacroAssembler::notify_method_entry() {
1536 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1537 // track stack depth. If it is possible to enter interp_only_mode we add
1538 // the code to check if the event should be sent.
1539 if (JvmtiExport::can_post_interpreter_events()) {
1540 Label L;
1541 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1542 cbzw(r3, L);
1543 call_VM(noreg, CAST_FROM_FN_PTR(address,
1544 InterpreterRuntime::post_method_entry));
1545 bind(L);
1756 profile_obj_type(tmp, mdo_arg_addr);
1757
1758 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1759 off_to_args += to_add;
1760 }
1761
1762 if (MethodData::profile_return()) {
1763 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1764 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1765 }
1766
1767 add(rscratch1, mdp, off_to_args);
1768 bind(done);
1769 mov(mdp, rscratch1);
1770
1771 if (MethodData::profile_return()) {
1772 // We're right after the type profile for the last
1773 // argument. tmp is the number of cells left in the
1774 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1775 // if there's a return to profile.
1776 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1777 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1778 }
1779 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1780 } else {
1781 assert(MethodData::profile_return(), "either profile call args or call ret");
1782 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1783 }
1784
1785 // mdp points right after the end of the
1786 // CallTypeData/VirtualCallTypeData, right after the cells for the
1787 // return value type if there's one
1788
1789 bind(profile_continue);
1790 }
1791 }
1792
1793 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1794 assert_different_registers(mdp, ret, tmp, rbcp);
1795 if (ProfileInterpreter && MethodData::profile_return()) {
1796 Label profile_continue, done;
1802
1803 // If we don't profile all invoke bytecodes we must make sure
1804 // it's a bytecode we indeed profile. We can't go back to the
1805 // beginning of the ProfileData we intend to update to check its
1806 // type because we're right after it and we don't known its
1807 // length
1808 Label do_profile;
1809 ldrb(rscratch1, Address(rbcp, 0));
1810 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1811 br(Assembler::EQ, do_profile);
1812 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1813 br(Assembler::EQ, do_profile);
1814 get_method(tmp);
1815 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1816 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1817 br(Assembler::NE, profile_continue);
1818
1819 bind(do_profile);
1820 }
1821
1822 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1823 mov(tmp, ret);
1824 profile_obj_type(tmp, mdo_ret_addr);
1825
1826 bind(profile_continue);
1827 }
1828 }
1829
1830 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1831 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1832 if (ProfileInterpreter && MethodData::profile_parameters()) {
1833 Label profile_continue, done;
1834
1835 test_method_data_pointer(mdp, profile_continue);
1836
1837 // Load the offset of the area within the MDO used for
1838 // parameters. If it's negative we're not profiling any parameters
1839 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1840 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1841
1842 // Compute a pointer to the area for parameters from the offset
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/constMethodFlags.hpp"
37 #include "oops/markWord.hpp"
38 #include "oops/method.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/inlineKlass.hpp"
41 #include "oops/resolvedIndyEntry.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "prims/jvmtiThreadState.hpp"
44 #include "runtime/basicLock.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/javaThread.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/powerOfTwo.hpp"
50
51 void InterpreterMacroAssembler::narrow(Register result) {
52
53 // Get method->_constMethod->_result_type
54 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
55 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
56 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
57
58 Label done, notBool, notByte, notChar;
59
60 // common case first
255 // and from word offset to byte offset
256 assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
257 ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
258 // skip past the header
259 add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
260 add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry
261 }
262
263 void InterpreterMacroAssembler::get_method_counters(Register method,
264 Register mcs, Label& skip) {
265 Label has_counters;
266 ldr(mcs, Address(method, Method::method_counters_offset()));
267 cbnz(mcs, has_counters);
268 call_VM(noreg, CAST_FROM_FN_PTR(address,
269 InterpreterRuntime::build_method_counters), method);
270 ldr(mcs, Address(method, Method::method_counters_offset()));
271 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
272 bind(has_counters);
273 }
274
275 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
276 Register t1, Register t2,
277 bool clear_fields, Label& alloc_failed) {
278 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
279 {
280 SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
281 // Trigger dtrace event for fastpath
282 push(atos);
283 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
284 pop(atos);
285 }
286 }
287
288 void InterpreterMacroAssembler::read_flat_field(Register holder_klass,
289 Register field_index, Register field_offset,
290 Register temp, Register obj) {
291 Label alloc_failed, empty_value, done;
292 const Register src = field_offset;
293 const Register alloc_temp = rscratch1;
294 const Register dst_temp = temp;
295 assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
296
297 // Grab the inline field klass
298 push(holder_klass);
299 const Register field_klass = holder_klass;
300 get_inline_type_field_klass(holder_klass, field_index, field_klass);
301
302 //check for empty value klass
303 test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
304
305 // allocate buffer
306 push(obj); // save holder
307 allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
308
309 // Have an oop instance buffer, copy into it
310 data_for_oop(obj, dst_temp, field_klass);
311 pop(alloc_temp); // restore holder
312 lea(src, Address(alloc_temp, field_offset));
313 // call_VM_leaf, clobbers a few regs, save restore new obj
314 push(obj);
315 access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
316 pop(obj);
317 pop(holder_klass);
318 b(done);
319
320 bind(empty_value);
321 get_empty_inline_type_oop(field_klass, dst_temp, obj);
322 pop(holder_klass);
323 b(done);
324
325 bind(alloc_failed);
326 pop(obj);
327 pop(holder_klass);
328 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
329 obj, field_index, holder_klass);
330
331 bind(done);
332
333 // Ensure the stores to copy the inline field contents are visible
334 // before any subsequent store that publishes this reference.
335 membar(Assembler::StoreStore);
336 }
337
338 // Load object from cpool->resolved_references(index)
339 void InterpreterMacroAssembler::load_resolved_reference_at_index(
340 Register result, Register index, Register tmp) {
341 assert_different_registers(result, index);
342
343 get_constant_pool(result);
344 // load pointer for resolved_references[] objArray
345 ldr(result, Address(result, ConstantPool::cache_offset()));
346 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
347 resolve_oop_handle(result, tmp, rscratch2);
348 // Add in the index
349 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
350 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
351 }
352
353 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
354 Register cpool, Register index, Register klass, Register temp) {
355 add(temp, cpool, index, LSL, LogBytesPerWord);
356 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
357 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
364 Register cache) {
365 const int method_offset = in_bytes(
366 ConstantPoolCache::base_offset() +
367 ((byte_no == TemplateTable::f2_byte)
368 ? ConstantPoolCacheEntry::f2_offset()
369 : ConstantPoolCacheEntry::f1_offset()));
370
371 ldr(method, Address(cache, method_offset)); // get f1 Method*
372 }
373
374 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
375 // subtype of super_klass.
376 //
377 // Args:
378 // r0: superklass
379 // Rsub_klass: subklass
380 //
381 // Kills:
382 // r2, r5
383 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
384 Label& ok_is_subtype,
385 bool profile) {
386 assert(Rsub_klass != r0, "r0 holds superklass");
387 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
388 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
389
390 // Profile the not-null value's klass.
391 if (profile) {
392 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
393 }
394
395 // Do the check.
396 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
397
398 // Profile the failure of the check.
399 if (profile) {
400 profile_typecheck_failed(r2); // blows r2
401 }
402 }
403
404 // Java Expression Stack
405
406 void InterpreterMacroAssembler::pop_ptr(Register r) {
407 ldr(r, post(esp, wordSize));
408 }
409
410 void InterpreterMacroAssembler::pop_i(Register r) {
411 ldrw(r, post(esp, wordSize));
412 }
413
414 void InterpreterMacroAssembler::pop_l(Register r) {
415 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
416 }
417
418 void InterpreterMacroAssembler::push_ptr(Register r) {
419 str(r, pre(esp, -wordSize));
420 }
421
742
743 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
744 bind(entry);
745 cmp(c_rarg1, r19); // check if bottom reached
746 br(Assembler::NE, loop); // if not at bottom then check this entry
747 }
748
749 bind(no_unlock);
750
751 // jvmti support
752 if (notify_jvmdi) {
753 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
754 } else {
755 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
756 }
757
758 // remove activation
759 // get sender esp
760 ldr(rscratch2,
761 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
762
763 if (StackReservedPages > 0) {
764 // testing if reserved zone needs to be re-enabled
765 Label no_reserved_zone_enabling;
766
767 // check if already enabled - if so no re-enabling needed
768 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
769 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
770 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
771 br(Assembler::EQ, no_reserved_zone_enabling);
772
773 // look for an overflow into the stack reserved zone, i.e.
774 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
775 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
776 cmp(rscratch2, rscratch1);
777 br(Assembler::LS, no_reserved_zone_enabling);
778
779 call_VM_leaf(
780 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
781 call_VM(noreg, CAST_FROM_FN_PTR(address,
782 InterpreterRuntime::throw_delayed_StackOverflowError));
783 should_not_reach_here();
784
785 bind(no_reserved_zone_enabling);
786 }
787
788 if (state == atos && InlineTypeReturnedAsFields) {
789 // Check if we are returning an non-null inline type and load its fields into registers
790 Label skip;
791 test_oop_is_not_inline_type(r0, rscratch2, skip);
792
793 // Load fields from a buffered value with an inline class specific handler
794 load_klass(rscratch1 /*dst*/, r0 /*src*/);
795 ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
796 ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
797 // Unpack handler can be null if inline type is not scalarizable in returns
798 cbz(rscratch1, skip);
799
800 blr(rscratch1);
801 #ifdef ASSERT
802 // TODO 8284443 Enable
803 if (StressCallingConvention && false) {
804 Label skip_stress;
805 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
806 ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
807 tstw(rscratch1, ConstMethodFlags::has_scalarized_return_flag());
808 br(Assembler::EQ, skip_stress);
809 load_klass(r0, r0);
810 orr(r0, r0, 1);
811 bind(skip_stress);
812 }
813 #endif
814 bind(skip);
815 // Check above kills sender esp in rscratch2. Reload it.
816 ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
817 }
818
819 // restore sender esp
820 mov(esp, rscratch2);
821 // remove frame anchor
822 leave();
823 // If we're returning to interpreted code we will shortly be
824 // adjusting SP to allow some space for ESP. If we're returning to
825 // compiled code the saved sender SP was saved in sender_sp, so this
826 // restores it.
827 andr(sp, esp, -16);
828 }
829
830 // Lock object
831 //
832 // Args:
833 // c_rarg1: BasicObjectLock to be used for locking
834 //
835 // Kills:
836 // r0
837 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
838 // rscratch1, rscratch2 (scratch regs)
858 Label slow_case;
859
860 // Load object pointer into obj_reg %c_rarg3
861 ldr(obj_reg, Address(lock_reg, obj_offset));
862
863 if (DiagnoseSyncOnValueBasedClasses != 0) {
864 load_klass(tmp, obj_reg);
865 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
866 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
867 br(Assembler::NE, slow_case);
868 }
869
870 if (LockingMode == LM_LIGHTWEIGHT) {
871 ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
872 fast_lock(obj_reg, tmp, rscratch1, rscratch2, slow_case);
873 b(count);
874 } else if (LockingMode == LM_LEGACY) {
875 // Load (object->mark() | 1) into swap_reg
876 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
877 orr(swap_reg, rscratch1, 1);
878 if (EnableValhalla) {
879 // Mask inline_type bit such that we go to the slow path if object is an inline type
880 andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
881 }
882
883 // Save (object->mark() | 1) into BasicLock's displaced header
884 str(swap_reg, Address(lock_reg, mark_offset));
885
886 assert(lock_offset == 0,
887 "displached header must be first word in BasicObjectLock");
888
889 Label fail;
890 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
891
892 // Fast check for recursive lock.
893 //
894 // Can apply the optimization only if this is a stack lock
895 // allocated in this thread. For efficiency, we can focus on
896 // recently allocated stack locks (instead of reading the stack
897 // base and checking whether 'mark' points inside the current
898 // thread stack):
899 // 1) (mark & 7) == 0, and
900 // 2) sp <= mark < mark + os::pagesize()
901 //
1236 Address data(mdp, in_bytes(JumpData::taken_offset()));
1237 ldr(bumped_count, data);
1238 assert(DataLayout::counter_increment == 1,
1239 "flow-free idiom only works with 1");
1240 // Intel does this to catch overflow
1241 // addptr(bumped_count, DataLayout::counter_increment);
1242 // sbbptr(bumped_count, 0);
1243 // so we do this
1244 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1245 Label L;
1246 br(Assembler::CS, L); // skip store if counter overflow
1247 str(bumped_count, data);
1248 bind(L);
1249 // The method data pointer needs to be updated to reflect the new target.
1250 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1251 bind(profile_continue);
1252 }
1253 }
1254
1255
1256 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1257 if (ProfileInterpreter) {
1258 Label profile_continue;
1259
1260 // If no method data exists, go to profile_continue.
1261 test_method_data_pointer(mdp, profile_continue);
1262
1263 // We are taking a branch. Increment the not taken count.
1264 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1265
1266 // The method data pointer needs to be updated to correspond to
1267 // the next bytecode
1268 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1269 bind(profile_continue);
1270 }
1271 }
1272
1273
1274 void InterpreterMacroAssembler::profile_call(Register mdp) {
1275 if (ProfileInterpreter) {
1276 Label profile_continue;
1277
1278 // If no method data exists, go to profile_continue.
1279 test_method_data_pointer(mdp, profile_continue);
1280
1281 // We are making a call. Increment the count.
1282 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1283
1284 // The method data pointer needs to be updated to reflect the new target.
1285 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1286 bind(profile_continue);
1287 }
1288 }
1612 // case_array_offset_in_bytes()
1613 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1614 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1615 Assembler::maddw(index, index, reg2, rscratch1);
1616
1617 // Update the case count
1618 increment_mdp_data_at(mdp,
1619 index,
1620 in_bytes(MultiBranchData::relative_count_offset()));
1621
1622 // The method data pointer needs to be updated.
1623 update_mdp_by_offset(mdp,
1624 index,
1625 in_bytes(MultiBranchData::
1626 relative_displacement_offset()));
1627
1628 bind(profile_continue);
1629 }
1630 }
1631
1632 void InterpreterMacroAssembler::profile_array(Register mdp,
1633 Register array,
1634 Register tmp) {
1635 if (ProfileInterpreter) {
1636 Label profile_continue;
1637
1638 // If no method data exists, go to profile_continue.
1639 test_method_data_pointer(mdp, profile_continue);
1640
1641 mov(tmp, array);
1642 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
1643
1644 Label not_flat;
1645 test_non_flat_array_oop(array, tmp, not_flat);
1646
1647 set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
1648
1649 bind(not_flat);
1650
1651 Label not_null_free;
1652 test_non_null_free_array_oop(array, tmp, not_null_free);
1653
1654 set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
1655
1656 bind(not_null_free);
1657
1658 bind(profile_continue);
1659 }
1660 }
1661
1662 void InterpreterMacroAssembler::profile_element(Register mdp,
1663 Register element,
1664 Register tmp) {
1665 if (ProfileInterpreter) {
1666 Label profile_continue;
1667
1668 // If no method data exists, go to profile_continue.
1669 test_method_data_pointer(mdp, profile_continue);
1670
1671 mov(tmp, element);
1672 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
1673
1674 // The method data pointer needs to be updated.
1675 update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
1676
1677 bind(profile_continue);
1678 }
1679 }
1680
1681 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1682 Register left,
1683 Register right,
1684 Register tmp) {
1685 if (ProfileInterpreter) {
1686 Label profile_continue;
1687
1688 // If no method data exists, go to profile_continue.
1689 test_method_data_pointer(mdp, profile_continue);
1690
1691 mov(tmp, left);
1692 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1693
1694 Label left_not_inline_type;
1695 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1696 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1697 bind(left_not_inline_type);
1698
1699 mov(tmp, right);
1700 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1701
1702 Label right_not_inline_type;
1703 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1704 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1705 bind(right_not_inline_type);
1706
1707 bind(profile_continue);
1708 }
1709 }
1710
1711 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1712 if (state == atos) {
1713 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1714 }
1715 }
1716
1717 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1718
1719
1720 void InterpreterMacroAssembler::notify_method_entry() {
1721 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1722 // track stack depth. If it is possible to enter interp_only_mode we add
1723 // the code to check if the event should be sent.
1724 if (JvmtiExport::can_post_interpreter_events()) {
1725 Label L;
1726 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1727 cbzw(r3, L);
1728 call_VM(noreg, CAST_FROM_FN_PTR(address,
1729 InterpreterRuntime::post_method_entry));
1730 bind(L);
1941 profile_obj_type(tmp, mdo_arg_addr);
1942
1943 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1944 off_to_args += to_add;
1945 }
1946
1947 if (MethodData::profile_return()) {
1948 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1949 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1950 }
1951
1952 add(rscratch1, mdp, off_to_args);
1953 bind(done);
1954 mov(mdp, rscratch1);
1955
1956 if (MethodData::profile_return()) {
1957 // We're right after the type profile for the last
1958 // argument. tmp is the number of cells left in the
1959 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1960 // if there's a return to profile.
1961 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1962 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1963 }
1964 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1965 } else {
1966 assert(MethodData::profile_return(), "either profile call args or call ret");
1967 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1968 }
1969
1970 // mdp points right after the end of the
1971 // CallTypeData/VirtualCallTypeData, right after the cells for the
1972 // return value type if there's one
1973
1974 bind(profile_continue);
1975 }
1976 }
1977
1978 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1979 assert_different_registers(mdp, ret, tmp, rbcp);
1980 if (ProfileInterpreter && MethodData::profile_return()) {
1981 Label profile_continue, done;
1987
1988 // If we don't profile all invoke bytecodes we must make sure
1989 // it's a bytecode we indeed profile. We can't go back to the
1990 // beginning of the ProfileData we intend to update to check its
1991 // type because we're right after it and we don't known its
1992 // length
1993 Label do_profile;
1994 ldrb(rscratch1, Address(rbcp, 0));
1995 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1996 br(Assembler::EQ, do_profile);
1997 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1998 br(Assembler::EQ, do_profile);
1999 get_method(tmp);
2000 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
2001 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
2002 br(Assembler::NE, profile_continue);
2003
2004 bind(do_profile);
2005 }
2006
2007 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
2008 mov(tmp, ret);
2009 profile_obj_type(tmp, mdo_ret_addr);
2010
2011 bind(profile_continue);
2012 }
2013 }
2014
2015 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
2016 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
2017 if (ProfileInterpreter && MethodData::profile_parameters()) {
2018 Label profile_continue, done;
2019
2020 test_method_data_pointer(mdp, profile_continue);
2021
2022 // Load the offset of the area within the MDO used for
2023 // parameters. If it's negative we're not profiling any parameters
2024 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
2025 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
2026
2027 // Compute a pointer to the area for parameters from the offset
|