16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/markWord.hpp"
37 #include "oops/method.hpp"
38 #include "oops/methodData.hpp"
39 #include "oops/resolvedFieldEntry.hpp"
40 #include "oops/resolvedIndyEntry.hpp"
41 #include "oops/resolvedMethodEntry.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "prims/jvmtiThreadState.hpp"
44 #include "runtime/basicLock.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/javaThread.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/powerOfTwo.hpp"
50
51 void InterpreterMacroAssembler::narrow(Register result) {
52
53 // Get method->_constMethod->_result_type
54 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
55 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
56 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
57
58 Label done, notBool, notByte, notChar;
197 eonw(index, index, zr); // convert to plain index
198 } else if (index_size == sizeof(u1)) {
199 load_unsigned_byte(index, Address(rbcp, bcp_offset));
200 } else {
201 ShouldNotReachHere();
202 }
203 }
204
205 void InterpreterMacroAssembler::get_method_counters(Register method,
206 Register mcs, Label& skip) {
207 Label has_counters;
208 ldr(mcs, Address(method, Method::method_counters_offset()));
209 cbnz(mcs, has_counters);
210 call_VM(noreg, CAST_FROM_FN_PTR(address,
211 InterpreterRuntime::build_method_counters), method);
212 ldr(mcs, Address(method, Method::method_counters_offset()));
213 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
214 bind(has_counters);
215 }
216
217 // Load object from cpool->resolved_references(index)
218 void InterpreterMacroAssembler::load_resolved_reference_at_index(
219 Register result, Register index, Register tmp) {
220 assert_different_registers(result, index);
221
222 get_constant_pool(result);
223 // load pointer for resolved_references[] objArray
224 ldr(result, Address(result, ConstantPool::cache_offset()));
225 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
226 resolve_oop_handle(result, tmp, rscratch2);
227 // Add in the index
228 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
229 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
230 }
231
232 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
233 Register cpool, Register index, Register klass, Register temp) {
234 add(temp, cpool, index, LSL, LogBytesPerWord);
235 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
236 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
237 add(klass, klass, temp, LSL, LogBytesPerWord);
238 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
239 }
240
241 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
242 // subtype of super_klass.
243 //
244 // Args:
245 // r0: superklass
246 // Rsub_klass: subklass
247 //
248 // Kills:
249 // r2, r5
250 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
251 Label& ok_is_subtype) {
252 assert(Rsub_klass != r0, "r0 holds superklass");
253 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
254 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
255
256 // Profile the not-null value's klass.
257 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
258
259 // Do the check.
260 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
261 }
262
263 // Java Expression Stack
264
265 void InterpreterMacroAssembler::pop_ptr(Register r) {
266 ldr(r, post(esp, wordSize));
267 }
268
269 void InterpreterMacroAssembler::pop_i(Register r) {
270 ldrw(r, post(esp, wordSize));
271 }
272
273 void InterpreterMacroAssembler::pop_l(Register r) {
274 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
275 }
276
277 void InterpreterMacroAssembler::push_ptr(Register r) {
605
606 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
607 bind(entry);
608 cmp(c_rarg1, r19); // check if bottom reached
609 br(Assembler::NE, loop); // if not at bottom then check this entry
610 }
611
612 bind(no_unlock);
613
614 // jvmti support
615 if (notify_jvmdi) {
616 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
617 } else {
618 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
619 }
620
621 // remove activation
622 // get sender esp
623 ldr(rscratch2,
624 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
625 if (StackReservedPages > 0) {
626 // testing if reserved zone needs to be re-enabled
627 Label no_reserved_zone_enabling;
628
629 // check if already enabled - if so no re-enabling needed
630 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
631 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
632 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
633 br(Assembler::EQ, no_reserved_zone_enabling);
634
635 // look for an overflow into the stack reserved zone, i.e.
636 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
637 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
638 cmp(rscratch2, rscratch1);
639 br(Assembler::LS, no_reserved_zone_enabling);
640
641 call_VM_leaf(
642 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
643 call_VM(noreg, CAST_FROM_FN_PTR(address,
644 InterpreterRuntime::throw_delayed_StackOverflowError));
645 should_not_reach_here();
646
647 bind(no_reserved_zone_enabling);
648 }
649
650 // restore sender esp
651 mov(esp, rscratch2);
652 // remove frame anchor
653 leave();
654 // If we're returning to interpreted code we will shortly be
655 // adjusting SP to allow some space for ESP. If we're returning to
656 // compiled code the saved sender SP was saved in sender_sp, so this
657 // restores it.
658 andr(sp, esp, -16);
659 }
660
661 // Lock object
662 //
663 // Args:
664 // c_rarg1: BasicObjectLock to be used for locking
665 //
666 // Kills:
667 // r0
668 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
669 // rscratch1, rscratch2 (scratch regs)
691 Label slow_case;
692
693 // Load object pointer into obj_reg %c_rarg3
694 ldr(obj_reg, Address(lock_reg, obj_offset));
695
696 if (DiagnoseSyncOnValueBasedClasses != 0) {
697 load_klass(tmp, obj_reg);
698 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
699 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
700 br(Assembler::NE, slow_case);
701 }
702
703 if (LockingMode == LM_LIGHTWEIGHT) {
704 ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
705 lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
706 b(count);
707 } else if (LockingMode == LM_LEGACY) {
708 // Load (object->mark() | 1) into swap_reg
709 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
710 orr(swap_reg, rscratch1, 1);
711
712 // Save (object->mark() | 1) into BasicLock's displaced header
713 str(swap_reg, Address(lock_reg, mark_offset));
714
715 assert(lock_offset == 0,
716 "displached header must be first word in BasicObjectLock");
717
718 Label fail;
719 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
720
721 // Fast check for recursive lock.
722 //
723 // Can apply the optimization only if this is a stack lock
724 // allocated in this thread. For efficiency, we can focus on
725 // recently allocated stack locks (instead of reading the stack
726 // base and checking whether 'mark' points inside the current
727 // thread stack):
728 // 1) (mark & 7) == 0, and
729 // 2) sp <= mark < mark + os::pagesize()
730 //
1066 Address data(mdp, in_bytes(JumpData::taken_offset()));
1067 ldr(bumped_count, data);
1068 assert(DataLayout::counter_increment == 1,
1069 "flow-free idiom only works with 1");
1070 // Intel does this to catch overflow
1071 // addptr(bumped_count, DataLayout::counter_increment);
1072 // sbbptr(bumped_count, 0);
1073 // so we do this
1074 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1075 Label L;
1076 br(Assembler::CS, L); // skip store if counter overflow
1077 str(bumped_count, data);
1078 bind(L);
1079 // The method data pointer needs to be updated to reflect the new target.
1080 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1081 bind(profile_continue);
1082 }
1083 }
1084
1085
1086 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1087 if (ProfileInterpreter) {
1088 Label profile_continue;
1089
1090 // If no method data exists, go to profile_continue.
1091 test_method_data_pointer(mdp, profile_continue);
1092
1093 // We are taking a branch. Increment the not taken count.
1094 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1095
1096 // The method data pointer needs to be updated to correspond to
1097 // the next bytecode
1098 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1099 bind(profile_continue);
1100 }
1101 }
1102
1103
1104 void InterpreterMacroAssembler::profile_call(Register mdp) {
1105 if (ProfileInterpreter) {
1106 Label profile_continue;
1107
1108 // If no method data exists, go to profile_continue.
1109 test_method_data_pointer(mdp, profile_continue);
1110
1111 // We are making a call. Increment the count.
1112 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1113
1114 // The method data pointer needs to be updated to reflect the new target.
1115 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1116 bind(profile_continue);
1117 }
1118 }
1401 // case_array_offset_in_bytes()
1402 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1403 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1404 Assembler::maddw(index, index, reg2, rscratch1);
1405
1406 // Update the case count
1407 increment_mdp_data_at(mdp,
1408 index,
1409 in_bytes(MultiBranchData::relative_count_offset()));
1410
1411 // The method data pointer needs to be updated.
1412 update_mdp_by_offset(mdp,
1413 index,
1414 in_bytes(MultiBranchData::
1415 relative_displacement_offset()));
1416
1417 bind(profile_continue);
1418 }
1419 }
1420
1421 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1422 if (state == atos) {
1423 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1424 }
1425 }
1426
1427 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1428
1429
1430 void InterpreterMacroAssembler::notify_method_entry() {
1431 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1432 // track stack depth. If it is possible to enter interp_only_mode we add
1433 // the code to check if the event should be sent.
1434 if (JvmtiExport::can_post_interpreter_events()) {
1435 Label L;
1436 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1437 cbzw(r3, L);
1438 call_VM(noreg, CAST_FROM_FN_PTR(address,
1439 InterpreterRuntime::post_method_entry));
1440 bind(L);
1655 profile_obj_type(tmp, mdo_arg_addr);
1656
1657 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1658 off_to_args += to_add;
1659 }
1660
1661 if (MethodData::profile_return()) {
1662 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1663 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1664 }
1665
1666 add(rscratch1, mdp, off_to_args);
1667 bind(done);
1668 mov(mdp, rscratch1);
1669
1670 if (MethodData::profile_return()) {
1671 // We're right after the type profile for the last
1672 // argument. tmp is the number of cells left in the
1673 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1674 // if there's a return to profile.
1675 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1676 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1677 }
1678 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1679 } else {
1680 assert(MethodData::profile_return(), "either profile call args or call ret");
1681 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1682 }
1683
1684 // mdp points right after the end of the
1685 // CallTypeData/VirtualCallTypeData, right after the cells for the
1686 // return value type if there's one
1687
1688 bind(profile_continue);
1689 }
1690 }
1691
1692 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1693 assert_different_registers(mdp, ret, tmp, rbcp);
1694 if (ProfileInterpreter && MethodData::profile_return()) {
1695 Label profile_continue, done;
1701
1702 // If we don't profile all invoke bytecodes we must make sure
1703 // it's a bytecode we indeed profile. We can't go back to the
1704 // beginning of the ProfileData we intend to update to check its
1705 // type because we're right after it and we don't known its
1706 // length
1707 Label do_profile;
1708 ldrb(rscratch1, Address(rbcp, 0));
1709 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1710 br(Assembler::EQ, do_profile);
1711 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1712 br(Assembler::EQ, do_profile);
1713 get_method(tmp);
1714 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1715 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1716 br(Assembler::NE, profile_continue);
1717
1718 bind(do_profile);
1719 }
1720
1721 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1722 mov(tmp, ret);
1723 profile_obj_type(tmp, mdo_ret_addr);
1724
1725 bind(profile_continue);
1726 }
1727 }
1728
1729 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1730 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1731 if (ProfileInterpreter && MethodData::profile_parameters()) {
1732 Label profile_continue, done;
1733
1734 test_method_data_pointer(mdp, profile_continue);
1735
1736 // Load the offset of the area within the MDO used for
1737 // parameters. If it's negative we're not profiling any parameters
1738 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1739 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1740
1741 // Compute a pointer to the area for parameters from the offset
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/constMethodFlags.hpp"
37 #include "oops/markWord.hpp"
38 #include "oops/method.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/inlineKlass.hpp"
41 #include "oops/resolvedFieldEntry.hpp"
42 #include "oops/resolvedIndyEntry.hpp"
43 #include "oops/resolvedMethodEntry.hpp"
44 #include "prims/jvmtiExport.hpp"
45 #include "prims/jvmtiThreadState.hpp"
46 #include "runtime/basicLock.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/javaThread.hpp"
49 #include "runtime/safepointMechanism.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/powerOfTwo.hpp"
52
53 void InterpreterMacroAssembler::narrow(Register result) {
54
55 // Get method->_constMethod->_result_type
56 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
57 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
58 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
59
60 Label done, notBool, notByte, notChar;
199 eonw(index, index, zr); // convert to plain index
200 } else if (index_size == sizeof(u1)) {
201 load_unsigned_byte(index, Address(rbcp, bcp_offset));
202 } else {
203 ShouldNotReachHere();
204 }
205 }
206
207 void InterpreterMacroAssembler::get_method_counters(Register method,
208 Register mcs, Label& skip) {
209 Label has_counters;
210 ldr(mcs, Address(method, Method::method_counters_offset()));
211 cbnz(mcs, has_counters);
212 call_VM(noreg, CAST_FROM_FN_PTR(address,
213 InterpreterRuntime::build_method_counters), method);
214 ldr(mcs, Address(method, Method::method_counters_offset()));
215 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
216 bind(has_counters);
217 }
218
219 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
220 Register t1, Register t2,
221 bool clear_fields, Label& alloc_failed) {
222 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
223 {
224 SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
225 // Trigger dtrace event for fastpath
226 push(atos);
227 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
228 pop(atos);
229 }
230 }
231
232 void InterpreterMacroAssembler::read_flat_field(Register holder_klass,
233 Register field_index, Register field_offset,
234 Register temp, Register obj) {
235 Label alloc_failed, empty_value, done;
236 const Register src = field_offset;
237 const Register alloc_temp = rscratch1;
238 const Register dst_temp = temp;
239 assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
240
241 // Grab the inline field klass
242 push(holder_klass);
243 const Register field_klass = holder_klass;
244 get_inline_type_field_klass(holder_klass, field_index, field_klass);
245
246 //check for empty value klass
247 test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
248
249 // allocate buffer
250 push(obj); // save holder
251 allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
252
253 // Have an oop instance buffer, copy into it
254 data_for_oop(obj, dst_temp, field_klass);
255 pop(alloc_temp); // restore holder
256 lea(src, Address(alloc_temp, field_offset));
257 // call_VM_leaf, clobbers a few regs, save restore new obj
258 push(obj);
259 access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
260 pop(obj);
261 pop(holder_klass);
262 b(done);
263
264 bind(empty_value);
265 get_empty_inline_type_oop(field_klass, dst_temp, obj);
266 pop(holder_klass);
267 b(done);
268
269 bind(alloc_failed);
270 pop(obj);
271 pop(holder_klass);
272 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
273 obj, field_index, holder_klass);
274
275 bind(done);
276
277 // Ensure the stores to copy the inline field contents are visible
278 // before any subsequent store that publishes this reference.
279 membar(Assembler::StoreStore);
280 }
281
282 // Load object from cpool->resolved_references(index)
283 void InterpreterMacroAssembler::load_resolved_reference_at_index(
284 Register result, Register index, Register tmp) {
285 assert_different_registers(result, index);
286
287 get_constant_pool(result);
288 // load pointer for resolved_references[] objArray
289 ldr(result, Address(result, ConstantPool::cache_offset()));
290 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
291 resolve_oop_handle(result, tmp, rscratch2);
292 // Add in the index
293 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
294 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
295 }
296
297 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
298 Register cpool, Register index, Register klass, Register temp) {
299 add(temp, cpool, index, LSL, LogBytesPerWord);
300 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
301 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
302 add(klass, klass, temp, LSL, LogBytesPerWord);
303 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
304 }
305
306 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
307 // subtype of super_klass.
308 //
309 // Args:
310 // r0: superklass
311 // Rsub_klass: subklass
312 //
313 // Kills:
314 // r2, r5
315 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
316 Label& ok_is_subtype,
317 bool profile) {
318 assert(Rsub_klass != r0, "r0 holds superklass");
319 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
320 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
321
322 // Profile the not-null value's klass.
323 if (profile) {
324 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
325 }
326
327 // Do the check.
328 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
329 }
330
331 // Java Expression Stack
332
333 void InterpreterMacroAssembler::pop_ptr(Register r) {
334 ldr(r, post(esp, wordSize));
335 }
336
337 void InterpreterMacroAssembler::pop_i(Register r) {
338 ldrw(r, post(esp, wordSize));
339 }
340
341 void InterpreterMacroAssembler::pop_l(Register r) {
342 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
343 }
344
345 void InterpreterMacroAssembler::push_ptr(Register r) {
673
674 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
675 bind(entry);
676 cmp(c_rarg1, r19); // check if bottom reached
677 br(Assembler::NE, loop); // if not at bottom then check this entry
678 }
679
680 bind(no_unlock);
681
682 // jvmti support
683 if (notify_jvmdi) {
684 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
685 } else {
686 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
687 }
688
689 // remove activation
690 // get sender esp
691 ldr(rscratch2,
692 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
693
694 if (StackReservedPages > 0) {
695 // testing if reserved zone needs to be re-enabled
696 Label no_reserved_zone_enabling;
697
698 // check if already enabled - if so no re-enabling needed
699 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
700 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
701 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
702 br(Assembler::EQ, no_reserved_zone_enabling);
703
704 // look for an overflow into the stack reserved zone, i.e.
705 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
706 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
707 cmp(rscratch2, rscratch1);
708 br(Assembler::LS, no_reserved_zone_enabling);
709
710 call_VM_leaf(
711 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
712 call_VM(noreg, CAST_FROM_FN_PTR(address,
713 InterpreterRuntime::throw_delayed_StackOverflowError));
714 should_not_reach_here();
715
716 bind(no_reserved_zone_enabling);
717 }
718
719 if (state == atos && InlineTypeReturnedAsFields) {
720 // Check if we are returning an non-null inline type and load its fields into registers
721 Label skip;
722 test_oop_is_not_inline_type(r0, rscratch2, skip);
723
724 // Load fields from a buffered value with an inline class specific handler
725 load_klass(rscratch1 /*dst*/, r0 /*src*/);
726 ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
727 ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
728 // Unpack handler can be null if inline type is not scalarizable in returns
729 cbz(rscratch1, skip);
730
731 blr(rscratch1);
732 #ifdef ASSERT
733 // TODO 8284443 Enable
734 if (StressCallingConvention && false) {
735 Label skip_stress;
736 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
737 ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
738 tstw(rscratch1, ConstMethodFlags::has_scalarized_return_flag());
739 br(Assembler::EQ, skip_stress);
740 load_klass(r0, r0);
741 orr(r0, r0, 1);
742 bind(skip_stress);
743 }
744 #endif
745 bind(skip);
746 // Check above kills sender esp in rscratch2. Reload it.
747 ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
748 }
749
750 // restore sender esp
751 mov(esp, rscratch2);
752 // remove frame anchor
753 leave();
754 // If we're returning to interpreted code we will shortly be
755 // adjusting SP to allow some space for ESP. If we're returning to
756 // compiled code the saved sender SP was saved in sender_sp, so this
757 // restores it.
758 andr(sp, esp, -16);
759 }
760
761 // Lock object
762 //
763 // Args:
764 // c_rarg1: BasicObjectLock to be used for locking
765 //
766 // Kills:
767 // r0
768 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
769 // rscratch1, rscratch2 (scratch regs)
791 Label slow_case;
792
793 // Load object pointer into obj_reg %c_rarg3
794 ldr(obj_reg, Address(lock_reg, obj_offset));
795
796 if (DiagnoseSyncOnValueBasedClasses != 0) {
797 load_klass(tmp, obj_reg);
798 ldrw(tmp, Address(tmp, Klass::access_flags_offset()));
799 tstw(tmp, JVM_ACC_IS_VALUE_BASED_CLASS);
800 br(Assembler::NE, slow_case);
801 }
802
803 if (LockingMode == LM_LIGHTWEIGHT) {
804 ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
805 lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
806 b(count);
807 } else if (LockingMode == LM_LEGACY) {
808 // Load (object->mark() | 1) into swap_reg
809 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
810 orr(swap_reg, rscratch1, 1);
811 if (EnableValhalla) {
812 // Mask inline_type bit such that we go to the slow path if object is an inline type
813 andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
814 }
815
816 // Save (object->mark() | 1) into BasicLock's displaced header
817 str(swap_reg, Address(lock_reg, mark_offset));
818
819 assert(lock_offset == 0,
820 "displached header must be first word in BasicObjectLock");
821
822 Label fail;
823 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
824
825 // Fast check for recursive lock.
826 //
827 // Can apply the optimization only if this is a stack lock
828 // allocated in this thread. For efficiency, we can focus on
829 // recently allocated stack locks (instead of reading the stack
830 // base and checking whether 'mark' points inside the current
831 // thread stack):
832 // 1) (mark & 7) == 0, and
833 // 2) sp <= mark < mark + os::pagesize()
834 //
1170 Address data(mdp, in_bytes(JumpData::taken_offset()));
1171 ldr(bumped_count, data);
1172 assert(DataLayout::counter_increment == 1,
1173 "flow-free idiom only works with 1");
1174 // Intel does this to catch overflow
1175 // addptr(bumped_count, DataLayout::counter_increment);
1176 // sbbptr(bumped_count, 0);
1177 // so we do this
1178 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1179 Label L;
1180 br(Assembler::CS, L); // skip store if counter overflow
1181 str(bumped_count, data);
1182 bind(L);
1183 // The method data pointer needs to be updated to reflect the new target.
1184 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1185 bind(profile_continue);
1186 }
1187 }
1188
1189
1190 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1191 if (ProfileInterpreter) {
1192 Label profile_continue;
1193
1194 // If no method data exists, go to profile_continue.
1195 test_method_data_pointer(mdp, profile_continue);
1196
1197 // We are taking a branch. Increment the not taken count.
1198 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1199
1200 // The method data pointer needs to be updated to correspond to
1201 // the next bytecode
1202 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1203 bind(profile_continue);
1204 }
1205 }
1206
1207
1208 void InterpreterMacroAssembler::profile_call(Register mdp) {
1209 if (ProfileInterpreter) {
1210 Label profile_continue;
1211
1212 // If no method data exists, go to profile_continue.
1213 test_method_data_pointer(mdp, profile_continue);
1214
1215 // We are making a call. Increment the count.
1216 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1217
1218 // The method data pointer needs to be updated to reflect the new target.
1219 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1220 bind(profile_continue);
1221 }
1222 }
1505 // case_array_offset_in_bytes()
1506 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1507 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1508 Assembler::maddw(index, index, reg2, rscratch1);
1509
1510 // Update the case count
1511 increment_mdp_data_at(mdp,
1512 index,
1513 in_bytes(MultiBranchData::relative_count_offset()));
1514
1515 // The method data pointer needs to be updated.
1516 update_mdp_by_offset(mdp,
1517 index,
1518 in_bytes(MultiBranchData::
1519 relative_displacement_offset()));
1520
1521 bind(profile_continue);
1522 }
1523 }
1524
1525 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1526 Register array,
1527 Register tmp) {
1528 if (ProfileInterpreter) {
1529 Label profile_continue;
1530
1531 // If no method data exists, go to profile_continue.
1532 test_method_data_pointer(mdp, profile_continue);
1533
1534 mov(tmp, array);
1535 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1536
1537 Label not_flat;
1538 test_non_flat_array_oop(array, tmp, not_flat);
1539
1540 set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1541
1542 bind(not_flat);
1543
1544 Label not_null_free;
1545 test_non_null_free_array_oop(array, tmp, not_null_free);
1546
1547 set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1548
1549 bind(not_null_free);
1550
1551 bind(profile_continue);
1552 }
1553 }
1554
1555 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1556 Register array,
1557 Register tmp);
1558 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1559 Register array,
1560 Register tmp);
1561
1562 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1563 if (ProfileInterpreter) {
1564 Label profile_continue;
1565
1566 // If no method data exists, go to profile_continue.
1567 test_method_data_pointer(mdp, profile_continue);
1568
1569 Label done, update;
1570 cbnz(element, update);
1571 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1572 b(done);
1573
1574 bind(update);
1575 load_klass(tmp, element);
1576
1577 // Record the object type.
1578 record_klass_in_profile(tmp, mdp, tmp2);
1579
1580 bind(done);
1581
1582 // The method data pointer needs to be updated.
1583 update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1584
1585 bind(profile_continue);
1586 }
1587 }
1588
1589
1590 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1591 Register element,
1592 Register tmp) {
1593 if (ProfileInterpreter) {
1594 Label profile_continue;
1595
1596 // If no method data exists, go to profile_continue.
1597 test_method_data_pointer(mdp, profile_continue);
1598
1599 mov(tmp, element);
1600 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1601
1602 // The method data pointer needs to be updated.
1603 update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1604
1605 bind(profile_continue);
1606 }
1607 }
1608
1609 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1610 Register left,
1611 Register right,
1612 Register tmp) {
1613 if (ProfileInterpreter) {
1614 Label profile_continue;
1615
1616 // If no method data exists, go to profile_continue.
1617 test_method_data_pointer(mdp, profile_continue);
1618
1619 mov(tmp, left);
1620 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1621
1622 Label left_not_inline_type;
1623 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1624 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1625 bind(left_not_inline_type);
1626
1627 mov(tmp, right);
1628 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1629
1630 Label right_not_inline_type;
1631 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1632 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1633 bind(right_not_inline_type);
1634
1635 bind(profile_continue);
1636 }
1637 }
1638
1639 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1640 if (state == atos) {
1641 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1642 }
1643 }
1644
1645 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1646
1647
1648 void InterpreterMacroAssembler::notify_method_entry() {
1649 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1650 // track stack depth. If it is possible to enter interp_only_mode we add
1651 // the code to check if the event should be sent.
1652 if (JvmtiExport::can_post_interpreter_events()) {
1653 Label L;
1654 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1655 cbzw(r3, L);
1656 call_VM(noreg, CAST_FROM_FN_PTR(address,
1657 InterpreterRuntime::post_method_entry));
1658 bind(L);
1873 profile_obj_type(tmp, mdo_arg_addr);
1874
1875 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1876 off_to_args += to_add;
1877 }
1878
1879 if (MethodData::profile_return()) {
1880 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1881 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1882 }
1883
1884 add(rscratch1, mdp, off_to_args);
1885 bind(done);
1886 mov(mdp, rscratch1);
1887
1888 if (MethodData::profile_return()) {
1889 // We're right after the type profile for the last
1890 // argument. tmp is the number of cells left in the
1891 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1892 // if there's a return to profile.
1893 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1894 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1895 }
1896 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1897 } else {
1898 assert(MethodData::profile_return(), "either profile call args or call ret");
1899 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1900 }
1901
1902 // mdp points right after the end of the
1903 // CallTypeData/VirtualCallTypeData, right after the cells for the
1904 // return value type if there's one
1905
1906 bind(profile_continue);
1907 }
1908 }
1909
1910 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1911 assert_different_registers(mdp, ret, tmp, rbcp);
1912 if (ProfileInterpreter && MethodData::profile_return()) {
1913 Label profile_continue, done;
1919
1920 // If we don't profile all invoke bytecodes we must make sure
1921 // it's a bytecode we indeed profile. We can't go back to the
1922 // beginning of the ProfileData we intend to update to check its
1923 // type because we're right after it and we don't known its
1924 // length
1925 Label do_profile;
1926 ldrb(rscratch1, Address(rbcp, 0));
1927 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1928 br(Assembler::EQ, do_profile);
1929 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1930 br(Assembler::EQ, do_profile);
1931 get_method(tmp);
1932 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1933 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1934 br(Assembler::NE, profile_continue);
1935
1936 bind(do_profile);
1937 }
1938
1939 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1940 mov(tmp, ret);
1941 profile_obj_type(tmp, mdo_ret_addr);
1942
1943 bind(profile_continue);
1944 }
1945 }
1946
1947 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1948 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1949 if (ProfileInterpreter && MethodData::profile_parameters()) {
1950 Label profile_continue, done;
1951
1952 test_method_data_pointer(mdp, profile_continue);
1953
1954 // Load the offset of the area within the MDO used for
1955 // parameters. If it's negative we're not profiling any parameters
1956 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1957 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1958
1959 // Compute a pointer to the area for parameters from the offset
|