16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/markWord.hpp"
37 #include "oops/method.hpp"
38 #include "oops/methodData.hpp"
39 #include "oops/resolvedFieldEntry.hpp"
40 #include "oops/resolvedIndyEntry.hpp"
41 #include "oops/resolvedMethodEntry.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "prims/jvmtiThreadState.hpp"
44 #include "runtime/basicLock.hpp"
45 #include "runtime/frame.inline.hpp"
46 #include "runtime/javaThread.hpp"
47 #include "runtime/safepointMechanism.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/powerOfTwo.hpp"
50
51 void InterpreterMacroAssembler::narrow(Register result) {
52
53 // Get method->_constMethod->_result_type
54 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
55 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
56 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
57
58 Label done, notBool, notByte, notChar;
192 ldrw(index, Address(rbcp, bcp_offset));
193 } else if (index_size == sizeof(u1)) {
194 load_unsigned_byte(index, Address(rbcp, bcp_offset));
195 } else {
196 ShouldNotReachHere();
197 }
198 }
199
200 void InterpreterMacroAssembler::get_method_counters(Register method,
201 Register mcs, Label& skip) {
202 Label has_counters;
203 ldr(mcs, Address(method, Method::method_counters_offset()));
204 cbnz(mcs, has_counters);
205 call_VM(noreg, CAST_FROM_FN_PTR(address,
206 InterpreterRuntime::build_method_counters), method);
207 ldr(mcs, Address(method, Method::method_counters_offset()));
208 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
209 bind(has_counters);
210 }
211
212 // Load object from cpool->resolved_references(index)
213 void InterpreterMacroAssembler::load_resolved_reference_at_index(
214 Register result, Register index, Register tmp) {
215 assert_different_registers(result, index);
216
217 get_constant_pool(result);
218 // load pointer for resolved_references[] objArray
219 ldr(result, Address(result, ConstantPool::cache_offset()));
220 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
221 resolve_oop_handle(result, tmp, rscratch2);
222 // Add in the index
223 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
224 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
225 }
226
227 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
228 Register cpool, Register index, Register klass, Register temp) {
229 add(temp, cpool, index, LSL, LogBytesPerWord);
230 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
231 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
232 add(klass, klass, temp, LSL, LogBytesPerWord);
233 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
234 }
235
236 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
237 // subtype of super_klass.
238 //
239 // Args:
240 // r0: superklass
241 // Rsub_klass: subklass
242 //
243 // Kills:
244 // r2, r5
245 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
246 Label& ok_is_subtype) {
247 assert(Rsub_klass != r0, "r0 holds superklass");
248 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
249 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
250
251 // Profile the not-null value's klass.
252 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
253
254 // Do the check.
255 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
256 }
257
258 // Java Expression Stack
259
260 void InterpreterMacroAssembler::pop_ptr(Register r) {
261 ldr(r, post(esp, wordSize));
262 }
263
264 void InterpreterMacroAssembler::pop_i(Register r) {
265 ldrw(r, post(esp, wordSize));
266 }
267
268 void InterpreterMacroAssembler::pop_l(Register r) {
269 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
270 }
271
272 void InterpreterMacroAssembler::push_ptr(Register r) {
600
601 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
602 bind(entry);
603 cmp(c_rarg1, r19); // check if bottom reached
604 br(Assembler::NE, loop); // if not at bottom then check this entry
605 }
606
607 bind(no_unlock);
608
609 // jvmti support
610 if (notify_jvmdi) {
611 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
612 } else {
613 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
614 }
615
616 // remove activation
617 // get sender esp
618 ldr(rscratch2,
619 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
620 if (StackReservedPages > 0) {
621 // testing if reserved zone needs to be re-enabled
622 Label no_reserved_zone_enabling;
623
624 // check if already enabled - if so no re-enabling needed
625 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
626 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
627 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
628 br(Assembler::EQ, no_reserved_zone_enabling);
629
630 // look for an overflow into the stack reserved zone, i.e.
631 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
632 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
633 cmp(rscratch2, rscratch1);
634 br(Assembler::LS, no_reserved_zone_enabling);
635
636 call_VM_leaf(
637 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
638 call_VM(noreg, CAST_FROM_FN_PTR(address,
639 InterpreterRuntime::throw_delayed_StackOverflowError));
640 should_not_reach_here();
641
642 bind(no_reserved_zone_enabling);
643 }
644
645 // restore sender esp
646 mov(esp, rscratch2);
647 // remove frame anchor
648 leave();
649 // If we're returning to interpreted code we will shortly be
650 // adjusting SP to allow some space for ESP. If we're returning to
651 // compiled code the saved sender SP was saved in sender_sp, so this
652 // restores it.
653 andr(sp, esp, -16);
654 }
655
656 // Lock object
657 //
658 // Args:
659 // c_rarg1: BasicObjectLock to be used for locking
660 //
661 // Kills:
662 // r0
663 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
664 // rscratch1, rscratch2 (scratch regs)
685
686 Label slow_case;
687
688 // Load object pointer into obj_reg %c_rarg3
689 ldr(obj_reg, Address(lock_reg, obj_offset));
690
691 if (DiagnoseSyncOnValueBasedClasses != 0) {
692 load_klass(tmp, obj_reg);
693 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
694 tst(tmp, KlassFlags::_misc_is_value_based_class);
695 br(Assembler::NE, slow_case);
696 }
697
698 if (LockingMode == LM_LIGHTWEIGHT) {
699 lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
700 b(count);
701 } else if (LockingMode == LM_LEGACY) {
702 // Load (object->mark() | 1) into swap_reg
703 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
704 orr(swap_reg, rscratch1, 1);
705
706 // Save (object->mark() | 1) into BasicLock's displaced header
707 str(swap_reg, Address(lock_reg, mark_offset));
708
709 assert(lock_offset == 0,
710 "displached header must be first word in BasicObjectLock");
711
712 Label fail;
713 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
714
715 // Fast check for recursive lock.
716 //
717 // Can apply the optimization only if this is a stack lock
718 // allocated in this thread. For efficiency, we can focus on
719 // recently allocated stack locks (instead of reading the stack
720 // base and checking whether 'mark' points inside the current
721 // thread stack):
722 // 1) (mark & 7) == 0, and
723 // 2) sp <= mark < mark + os::pagesize()
724 //
1038 Address data(mdp, in_bytes(JumpData::taken_offset()));
1039 ldr(bumped_count, data);
1040 assert(DataLayout::counter_increment == 1,
1041 "flow-free idiom only works with 1");
1042 // Intel does this to catch overflow
1043 // addptr(bumped_count, DataLayout::counter_increment);
1044 // sbbptr(bumped_count, 0);
1045 // so we do this
1046 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1047 Label L;
1048 br(Assembler::CS, L); // skip store if counter overflow
1049 str(bumped_count, data);
1050 bind(L);
1051 // The method data pointer needs to be updated to reflect the new target.
1052 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1053 bind(profile_continue);
1054 }
1055 }
1056
1057
1058 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1059 if (ProfileInterpreter) {
1060 Label profile_continue;
1061
1062 // If no method data exists, go to profile_continue.
1063 test_method_data_pointer(mdp, profile_continue);
1064
1065 // We are taking a branch. Increment the not taken count.
1066 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1067
1068 // The method data pointer needs to be updated to correspond to
1069 // the next bytecode
1070 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1071 bind(profile_continue);
1072 }
1073 }
1074
1075
1076 void InterpreterMacroAssembler::profile_call(Register mdp) {
1077 if (ProfileInterpreter) {
1078 Label profile_continue;
1079
1080 // If no method data exists, go to profile_continue.
1081 test_method_data_pointer(mdp, profile_continue);
1082
1083 // We are making a call. Increment the count.
1084 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1085
1086 // The method data pointer needs to be updated to reflect the new target.
1087 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1088 bind(profile_continue);
1089 }
1090 }
1373 // case_array_offset_in_bytes()
1374 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1375 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1376 Assembler::maddw(index, index, reg2, rscratch1);
1377
1378 // Update the case count
1379 increment_mdp_data_at(mdp,
1380 index,
1381 in_bytes(MultiBranchData::relative_count_offset()));
1382
1383 // The method data pointer needs to be updated.
1384 update_mdp_by_offset(mdp,
1385 index,
1386 in_bytes(MultiBranchData::
1387 relative_displacement_offset()));
1388
1389 bind(profile_continue);
1390 }
1391 }
1392
1393 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1394 if (state == atos) {
1395 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1396 }
1397 }
1398
1399 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1400
1401
1402 void InterpreterMacroAssembler::notify_method_entry() {
1403 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1404 // track stack depth. If it is possible to enter interp_only_mode we add
1405 // the code to check if the event should be sent.
1406 if (JvmtiExport::can_post_interpreter_events()) {
1407 Label L;
1408 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1409 cbzw(r3, L);
1410 call_VM(noreg, CAST_FROM_FN_PTR(address,
1411 InterpreterRuntime::post_method_entry));
1412 bind(L);
1625 profile_obj_type(tmp, mdo_arg_addr);
1626
1627 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1628 off_to_args += to_add;
1629 }
1630
1631 if (MethodData::profile_return()) {
1632 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1633 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1634 }
1635
1636 add(rscratch1, mdp, off_to_args);
1637 bind(done);
1638 mov(mdp, rscratch1);
1639
1640 if (MethodData::profile_return()) {
1641 // We're right after the type profile for the last
1642 // argument. tmp is the number of cells left in the
1643 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1644 // if there's a return to profile.
1645 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1646 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1647 }
1648 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1649 } else {
1650 assert(MethodData::profile_return(), "either profile call args or call ret");
1651 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1652 }
1653
1654 // mdp points right after the end of the
1655 // CallTypeData/VirtualCallTypeData, right after the cells for the
1656 // return value type if there's one
1657
1658 bind(profile_continue);
1659 }
1660 }
1661
1662 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1663 assert_different_registers(mdp, ret, tmp, rbcp);
1664 if (ProfileInterpreter && MethodData::profile_return()) {
1665 Label profile_continue, done;
1671
1672 // If we don't profile all invoke bytecodes we must make sure
1673 // it's a bytecode we indeed profile. We can't go back to the
1674 // beginning of the ProfileData we intend to update to check its
1675 // type because we're right after it and we don't known its
1676 // length
1677 Label do_profile;
1678 ldrb(rscratch1, Address(rbcp, 0));
1679 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1680 br(Assembler::EQ, do_profile);
1681 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1682 br(Assembler::EQ, do_profile);
1683 get_method(tmp);
1684 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1685 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1686 br(Assembler::NE, profile_continue);
1687
1688 bind(do_profile);
1689 }
1690
1691 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1692 mov(tmp, ret);
1693 profile_obj_type(tmp, mdo_ret_addr);
1694
1695 bind(profile_continue);
1696 }
1697 }
1698
1699 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1700 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1701 if (ProfileInterpreter && MethodData::profile_parameters()) {
1702 Label profile_continue, done;
1703
1704 test_method_data_pointer(mdp, profile_continue);
1705
1706 // Load the offset of the area within the MDO used for
1707 // parameters. If it's negative we're not profiling any parameters
1708 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1709 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1710
1711 // Compute a pointer to the area for parameters from the offset
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interp_masm_aarch64.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "logging/log.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/constMethodFlags.hpp"
37 #include "oops/markWord.hpp"
38 #include "oops/method.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/inlineKlass.hpp"
41 #include "oops/resolvedFieldEntry.hpp"
42 #include "oops/resolvedIndyEntry.hpp"
43 #include "oops/resolvedMethodEntry.hpp"
44 #include "prims/jvmtiExport.hpp"
45 #include "prims/jvmtiThreadState.hpp"
46 #include "runtime/basicLock.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/javaThread.hpp"
49 #include "runtime/safepointMechanism.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/powerOfTwo.hpp"
52
53 void InterpreterMacroAssembler::narrow(Register result) {
54
55 // Get method->_constMethod->_result_type
56 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
57 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
58 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
59
60 Label done, notBool, notByte, notChar;
194 ldrw(index, Address(rbcp, bcp_offset));
195 } else if (index_size == sizeof(u1)) {
196 load_unsigned_byte(index, Address(rbcp, bcp_offset));
197 } else {
198 ShouldNotReachHere();
199 }
200 }
201
202 void InterpreterMacroAssembler::get_method_counters(Register method,
203 Register mcs, Label& skip) {
204 Label has_counters;
205 ldr(mcs, Address(method, Method::method_counters_offset()));
206 cbnz(mcs, has_counters);
207 call_VM(noreg, CAST_FROM_FN_PTR(address,
208 InterpreterRuntime::build_method_counters), method);
209 ldr(mcs, Address(method, Method::method_counters_offset()));
210 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
211 bind(has_counters);
212 }
213
214 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
215 Register t1, Register t2,
216 bool clear_fields, Label& alloc_failed) {
217 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
218 {
219 SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
220 // Trigger dtrace event for fastpath
221 push(atos);
222 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
223 pop(atos);
224 }
225 }
226
227 void InterpreterMacroAssembler::read_flat_field(Register entry,
228 Register field_index, Register field_offset,
229 Register temp, Register obj) {
230 Label alloc_failed, empty_value, done;
231 const Register src = field_offset;
232 const Register alloc_temp = r10;
233 const Register dst_temp = field_index;
234 const Register layout_info = temp;
235 assert_different_registers(obj, entry, field_index, field_offset, temp, alloc_temp);
236
237 // Grab the inline field klass
238 ldr(rscratch1, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
239 inline_layout_info(rscratch1, field_index, layout_info);
240
241 const Register field_klass = dst_temp;
242 ldr(field_klass, Address(layout_info, in_bytes(InlineLayoutInfo::klass_offset())));
243
244 // check for empty value klass
245 test_klass_is_empty_inline_type(field_klass, rscratch1, empty_value);
246
247 // allocate buffer
248 push(obj); // save holder
249 allocate_instance(field_klass, obj, alloc_temp, rscratch2, false, alloc_failed);
250
251 // Have an oop instance buffer, copy into it
252 data_for_oop(obj, dst_temp, field_klass); // danger, uses rscratch1
253 pop(alloc_temp); // restore holder
254 lea(src, Address(alloc_temp, field_offset));
255 // call_VM_leaf, clobbers a few regs, save restore new obj
256 push(obj);
257 flat_field_copy(IS_DEST_UNINITIALIZED, src, dst_temp, layout_info);
258 pop(obj);
259 b(done);
260
261 bind(empty_value);
262 get_empty_inline_type_oop(field_klass, alloc_temp, obj);
263 b(done);
264
265 bind(alloc_failed);
266 pop(obj);
267 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
268 obj, entry);
269
270 bind(done);
271 membar(Assembler::StoreStore);
272 }
273
274 // Load object from cpool->resolved_references(index)
275 void InterpreterMacroAssembler::load_resolved_reference_at_index(
276 Register result, Register index, Register tmp) {
277 assert_different_registers(result, index);
278
279 get_constant_pool(result);
280 // load pointer for resolved_references[] objArray
281 ldr(result, Address(result, ConstantPool::cache_offset()));
282 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
283 resolve_oop_handle(result, tmp, rscratch2);
284 // Add in the index
285 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
286 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
287 }
288
289 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
290 Register cpool, Register index, Register klass, Register temp) {
291 add(temp, cpool, index, LSL, LogBytesPerWord);
292 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
293 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
294 add(klass, klass, temp, LSL, LogBytesPerWord);
295 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
296 }
297
298 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
299 // subtype of super_klass.
300 //
301 // Args:
302 // r0: superklass
303 // Rsub_klass: subklass
304 //
305 // Kills:
306 // r2, r5
307 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
308 Label& ok_is_subtype,
309 bool profile) {
310 assert(Rsub_klass != r0, "r0 holds superklass");
311 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
312 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
313
314 // Profile the not-null value's klass.
315 if (profile) {
316 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
317 }
318
319 // Do the check.
320 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
321 }
322
323 // Java Expression Stack
324
325 void InterpreterMacroAssembler::pop_ptr(Register r) {
326 ldr(r, post(esp, wordSize));
327 }
328
329 void InterpreterMacroAssembler::pop_i(Register r) {
330 ldrw(r, post(esp, wordSize));
331 }
332
333 void InterpreterMacroAssembler::pop_l(Register r) {
334 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
335 }
336
337 void InterpreterMacroAssembler::push_ptr(Register r) {
665
666 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
667 bind(entry);
668 cmp(c_rarg1, r19); // check if bottom reached
669 br(Assembler::NE, loop); // if not at bottom then check this entry
670 }
671
672 bind(no_unlock);
673
674 // jvmti support
675 if (notify_jvmdi) {
676 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
677 } else {
678 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
679 }
680
681 // remove activation
682 // get sender esp
683 ldr(rscratch2,
684 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
685
686 if (StackReservedPages > 0) {
687 // testing if reserved zone needs to be re-enabled
688 Label no_reserved_zone_enabling;
689
690 // check if already enabled - if so no re-enabling needed
691 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
692 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
693 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
694 br(Assembler::EQ, no_reserved_zone_enabling);
695
696 // look for an overflow into the stack reserved zone, i.e.
697 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
698 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
699 cmp(rscratch2, rscratch1);
700 br(Assembler::LS, no_reserved_zone_enabling);
701
702 call_VM_leaf(
703 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
704 call_VM(noreg, CAST_FROM_FN_PTR(address,
705 InterpreterRuntime::throw_delayed_StackOverflowError));
706 should_not_reach_here();
707
708 bind(no_reserved_zone_enabling);
709 }
710
711 if (state == atos && InlineTypeReturnedAsFields) {
712 // Check if we are returning an non-null inline type and load its fields into registers
713 Label skip;
714 test_oop_is_not_inline_type(r0, rscratch2, skip);
715
716 // Load fields from a buffered value with an inline class specific handler
717 load_klass(rscratch1 /*dst*/, r0 /*src*/);
718 ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
719 ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
720 // Unpack handler can be null if inline type is not scalarizable in returns
721 cbz(rscratch1, skip);
722
723 blr(rscratch1);
724 #ifdef ASSERT
725 // TODO 8284443 Enable
726 if (StressCallingConvention && false) {
727 Label skip_stress;
728 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
729 ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
730 tstw(rscratch1, MethodFlags::has_scalarized_return_flag());
731 br(Assembler::EQ, skip_stress);
732 load_klass(r0, r0);
733 orr(r0, r0, 1);
734 bind(skip_stress);
735 }
736 #endif
737 bind(skip);
738 // Check above kills sender esp in rscratch2. Reload it.
739 ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
740 }
741
742 // restore sender esp
743 mov(esp, rscratch2);
744 // remove frame anchor
745 leave();
746 // If we're returning to interpreted code we will shortly be
747 // adjusting SP to allow some space for ESP. If we're returning to
748 // compiled code the saved sender SP was saved in sender_sp, so this
749 // restores it.
750 andr(sp, esp, -16);
751 }
752
753 // Lock object
754 //
755 // Args:
756 // c_rarg1: BasicObjectLock to be used for locking
757 //
758 // Kills:
759 // r0
760 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
761 // rscratch1, rscratch2 (scratch regs)
782
783 Label slow_case;
784
785 // Load object pointer into obj_reg %c_rarg3
786 ldr(obj_reg, Address(lock_reg, obj_offset));
787
788 if (DiagnoseSyncOnValueBasedClasses != 0) {
789 load_klass(tmp, obj_reg);
790 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
791 tst(tmp, KlassFlags::_misc_is_value_based_class);
792 br(Assembler::NE, slow_case);
793 }
794
795 if (LockingMode == LM_LIGHTWEIGHT) {
796 lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
797 b(count);
798 } else if (LockingMode == LM_LEGACY) {
799 // Load (object->mark() | 1) into swap_reg
800 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
801 orr(swap_reg, rscratch1, 1);
802 if (EnableValhalla) {
803 // Mask inline_type bit such that we go to the slow path if object is an inline type
804 andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
805 }
806
807 // Save (object->mark() | 1) into BasicLock's displaced header
808 str(swap_reg, Address(lock_reg, mark_offset));
809
810 assert(lock_offset == 0,
811 "displached header must be first word in BasicObjectLock");
812
813 Label fail;
814 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
815
816 // Fast check for recursive lock.
817 //
818 // Can apply the optimization only if this is a stack lock
819 // allocated in this thread. For efficiency, we can focus on
820 // recently allocated stack locks (instead of reading the stack
821 // base and checking whether 'mark' points inside the current
822 // thread stack):
823 // 1) (mark & 7) == 0, and
824 // 2) sp <= mark < mark + os::pagesize()
825 //
1139 Address data(mdp, in_bytes(JumpData::taken_offset()));
1140 ldr(bumped_count, data);
1141 assert(DataLayout::counter_increment == 1,
1142 "flow-free idiom only works with 1");
1143 // Intel does this to catch overflow
1144 // addptr(bumped_count, DataLayout::counter_increment);
1145 // sbbptr(bumped_count, 0);
1146 // so we do this
1147 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1148 Label L;
1149 br(Assembler::CS, L); // skip store if counter overflow
1150 str(bumped_count, data);
1151 bind(L);
1152 // The method data pointer needs to be updated to reflect the new target.
1153 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1154 bind(profile_continue);
1155 }
1156 }
1157
1158
1159 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1160 if (ProfileInterpreter) {
1161 Label profile_continue;
1162
1163 // If no method data exists, go to profile_continue.
1164 test_method_data_pointer(mdp, profile_continue);
1165
1166 // We are taking a branch. Increment the not taken count.
1167 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1168
1169 // The method data pointer needs to be updated to correspond to
1170 // the next bytecode
1171 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1172 bind(profile_continue);
1173 }
1174 }
1175
1176
1177 void InterpreterMacroAssembler::profile_call(Register mdp) {
1178 if (ProfileInterpreter) {
1179 Label profile_continue;
1180
1181 // If no method data exists, go to profile_continue.
1182 test_method_data_pointer(mdp, profile_continue);
1183
1184 // We are making a call. Increment the count.
1185 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1186
1187 // The method data pointer needs to be updated to reflect the new target.
1188 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1189 bind(profile_continue);
1190 }
1191 }
1474 // case_array_offset_in_bytes()
1475 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1476 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1477 Assembler::maddw(index, index, reg2, rscratch1);
1478
1479 // Update the case count
1480 increment_mdp_data_at(mdp,
1481 index,
1482 in_bytes(MultiBranchData::relative_count_offset()));
1483
1484 // The method data pointer needs to be updated.
1485 update_mdp_by_offset(mdp,
1486 index,
1487 in_bytes(MultiBranchData::
1488 relative_displacement_offset()));
1489
1490 bind(profile_continue);
1491 }
1492 }
1493
1494 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1495 Register array,
1496 Register tmp) {
1497 if (ProfileInterpreter) {
1498 Label profile_continue;
1499
1500 // If no method data exists, go to profile_continue.
1501 test_method_data_pointer(mdp, profile_continue);
1502
1503 mov(tmp, array);
1504 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1505
1506 Label not_flat;
1507 test_non_flat_array_oop(array, tmp, not_flat);
1508
1509 set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1510
1511 bind(not_flat);
1512
1513 Label not_null_free;
1514 test_non_null_free_array_oop(array, tmp, not_null_free);
1515
1516 set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1517
1518 bind(not_null_free);
1519
1520 bind(profile_continue);
1521 }
1522 }
1523
1524 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1525 Register array,
1526 Register tmp);
1527 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1528 Register array,
1529 Register tmp);
1530
1531 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1532 if (ProfileInterpreter) {
1533 Label profile_continue;
1534
1535 // If no method data exists, go to profile_continue.
1536 test_method_data_pointer(mdp, profile_continue);
1537
1538 Label done, update;
1539 cbnz(element, update);
1540 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1541 b(done);
1542
1543 bind(update);
1544 load_klass(tmp, element);
1545
1546 // Record the object type.
1547 record_klass_in_profile(tmp, mdp, tmp2);
1548
1549 bind(done);
1550
1551 // The method data pointer needs to be updated.
1552 update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1553
1554 bind(profile_continue);
1555 }
1556 }
1557
1558
1559 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1560 Register element,
1561 Register tmp) {
1562 if (ProfileInterpreter) {
1563 Label profile_continue;
1564
1565 // If no method data exists, go to profile_continue.
1566 test_method_data_pointer(mdp, profile_continue);
1567
1568 mov(tmp, element);
1569 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1570
1571 // The method data pointer needs to be updated.
1572 update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1573
1574 bind(profile_continue);
1575 }
1576 }
1577
1578 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1579 Register left,
1580 Register right,
1581 Register tmp) {
1582 if (ProfileInterpreter) {
1583 Label profile_continue;
1584
1585 // If no method data exists, go to profile_continue.
1586 test_method_data_pointer(mdp, profile_continue);
1587
1588 mov(tmp, left);
1589 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1590
1591 Label left_not_inline_type;
1592 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1593 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1594 bind(left_not_inline_type);
1595
1596 mov(tmp, right);
1597 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1598
1599 Label right_not_inline_type;
1600 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1601 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1602 bind(right_not_inline_type);
1603
1604 bind(profile_continue);
1605 }
1606 }
1607
1608 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1609 if (state == atos) {
1610 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1611 }
1612 }
1613
1614 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1615
1616
1617 void InterpreterMacroAssembler::notify_method_entry() {
1618 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1619 // track stack depth. If it is possible to enter interp_only_mode we add
1620 // the code to check if the event should be sent.
1621 if (JvmtiExport::can_post_interpreter_events()) {
1622 Label L;
1623 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1624 cbzw(r3, L);
1625 call_VM(noreg, CAST_FROM_FN_PTR(address,
1626 InterpreterRuntime::post_method_entry));
1627 bind(L);
1840 profile_obj_type(tmp, mdo_arg_addr);
1841
1842 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1843 off_to_args += to_add;
1844 }
1845
1846 if (MethodData::profile_return()) {
1847 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1848 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1849 }
1850
1851 add(rscratch1, mdp, off_to_args);
1852 bind(done);
1853 mov(mdp, rscratch1);
1854
1855 if (MethodData::profile_return()) {
1856 // We're right after the type profile for the last
1857 // argument. tmp is the number of cells left in the
1858 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1859 // if there's a return to profile.
1860 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1861 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1862 }
1863 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1864 } else {
1865 assert(MethodData::profile_return(), "either profile call args or call ret");
1866 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1867 }
1868
1869 // mdp points right after the end of the
1870 // CallTypeData/VirtualCallTypeData, right after the cells for the
1871 // return value type if there's one
1872
1873 bind(profile_continue);
1874 }
1875 }
1876
1877 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1878 assert_different_registers(mdp, ret, tmp, rbcp);
1879 if (ProfileInterpreter && MethodData::profile_return()) {
1880 Label profile_continue, done;
1886
1887 // If we don't profile all invoke bytecodes we must make sure
1888 // it's a bytecode we indeed profile. We can't go back to the
1889 // beginning of the ProfileData we intend to update to check its
1890 // type because we're right after it and we don't known its
1891 // length
1892 Label do_profile;
1893 ldrb(rscratch1, Address(rbcp, 0));
1894 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1895 br(Assembler::EQ, do_profile);
1896 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1897 br(Assembler::EQ, do_profile);
1898 get_method(tmp);
1899 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1900 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1901 br(Assembler::NE, profile_continue);
1902
1903 bind(do_profile);
1904 }
1905
1906 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1907 mov(tmp, ret);
1908 profile_obj_type(tmp, mdo_ret_addr);
1909
1910 bind(profile_continue);
1911 }
1912 }
1913
1914 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1915 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1916 if (ProfileInterpreter && MethodData::profile_parameters()) {
1917 Label profile_continue, done;
1918
1919 test_method_data_pointer(mdp, profile_continue);
1920
1921 // Load the offset of the area within the MDO used for
1922 // parameters. If it's negative we're not profiling any parameters
1923 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1924 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1925
1926 // Compute a pointer to the area for parameters from the offset
|