11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/markWord.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "oops/resolvedFieldEntry.hpp"
35 #include "oops/resolvedIndyEntry.hpp"
36 #include "oops/resolvedMethodEntry.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/basicLock.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/javaThread.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 // Implementation of InterpreterMacroAssembler
47
48 void InterpreterMacroAssembler::jump_to_entry(address entry) {
49 assert(entry, "Entry must have been generated by now");
50 jump(RuntimeAddress(entry));
51 }
52
53 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
148 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
149 profile_obj_type(tmp, mdo_arg_addr);
150
151 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
152 addptr(mdp, to_add);
153 off_to_args += to_add;
154 }
155
156 if (MethodData::profile_return()) {
157 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
158 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
159 }
160
161 bind(done);
162
163 if (MethodData::profile_return()) {
164 // We're right after the type profile for the last
165 // argument. tmp is the number of cells left in the
166 // CallTypeData/VirtualCallTypeData to reach its end. Non null
167 // if there's a return to profile.
168 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
169 shll(tmp, log2i_exact((int)DataLayout::cell_size));
170 addptr(mdp, tmp);
171 }
172 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
173 } else {
174 assert(MethodData::profile_return(), "either profile call args or call ret");
175 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
176 }
177
178 // mdp points right after the end of the
179 // CallTypeData/VirtualCallTypeData, right after the cells for the
180 // return value type if there's one
181
182 bind(profile_continue);
183 }
184 }
185
186 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
187 assert_different_registers(mdp, ret, tmp, _bcp_register);
188 if (ProfileInterpreter && MethodData::profile_return()) {
193 if (MethodData::profile_return_jsr292_only()) {
194 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
195
196 // If we don't profile all invoke bytecodes we must make sure
197 // it's a bytecode we indeed profile. We can't go back to the
198 // beginning of the ProfileData we intend to update to check its
199 // type because we're right after it and we don't known its
200 // length
201 Label do_profile;
202 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
203 jcc(Assembler::equal, do_profile);
204 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
205 jcc(Assembler::equal, do_profile);
206 get_method(tmp);
207 cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
208 jcc(Assembler::notEqual, profile_continue);
209
210 bind(do_profile);
211 }
212
213 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
214 mov(tmp, ret);
215 profile_obj_type(tmp, mdo_ret_addr);
216
217 bind(profile_continue);
218 }
219 }
220
221 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
222 if (ProfileInterpreter && MethodData::profile_parameters()) {
223 Label profile_continue;
224
225 test_method_data_pointer(mdp, profile_continue);
226
227 // Load the offset of the area within the MDO used for
228 // parameters. If it's negative we're not profiling any parameters
229 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
230 testl(tmp1, tmp1);
231 jcc(Assembler::negative, profile_continue);
232
233 // Compute a pointer to the area for parameters from the offset
565 Register cpool,
566 Register index) {
567 assert_different_registers(cpool, index);
568
569 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
570 Register resolved_klasses = cpool;
571 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
572 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
573 }
574
575 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
576 // subtype of super_klass.
577 //
578 // Args:
579 // rax: superklass
580 // Rsub_klass: subklass
581 //
582 // Kills:
583 // rcx
584 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
585 Label& ok_is_subtype) {
586 assert(Rsub_klass != rax, "rax holds superklass");
587 assert(Rsub_klass != r14, "r14 holds locals");
588 assert(Rsub_klass != r13, "r13 holds bcp");
589 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
590
591 // Profile the not-null value's klass.
592 profile_typecheck(rcx, Rsub_klass); // blows rcx
593
594 // Do the check.
595 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
596 }
597
598
599 // Java Expression Stack
600
601 void InterpreterMacroAssembler::pop_ptr(Register r) {
602 pop(r);
603 }
604
605 void InterpreterMacroAssembler::push_ptr(Register r) {
606 push(r);
607 }
608
609 void InterpreterMacroAssembler::push_i(Register r) {
610 push(r);
611 }
612
613 void InterpreterMacroAssembler::push_i_or_ptr(Register r) {
866 Label unlocked, unlock, no_unlock;
867
868 #ifdef ASSERT
869 Label not_preempted;
870 cmpptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
871 jcc(Assembler::equal, not_preempted);
872 stop("remove_activation: should not have alternate return address set");
873 bind(not_preempted);
874 #endif /* ASSERT */
875
876 const Register rthread = r15_thread;
877 const Register robj = c_rarg1;
878 const Register rmon = c_rarg1;
879
880 // get the value of _do_not_unlock_if_synchronized into rdx
881 const Address do_not_unlock_if_synchronized(rthread,
882 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
883 movbool(rbx, do_not_unlock_if_synchronized);
884 movbool(do_not_unlock_if_synchronized, false); // reset the flag
885
886 // get method access flags
887 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
888 load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
889 testl(rcx, JVM_ACC_SYNCHRONIZED);
890 jcc(Assembler::zero, unlocked);
891
892 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
893 // is set.
894 testbool(rbx);
895 jcc(Assembler::notZero, no_unlock);
896
897 // unlock monitor
898 push(state); // save result
899
900 // BasicObjectLock will be first in list, since this is a
901 // synchronized method. However, need to check that the object has
902 // not been unlocked by an explicit monitorexit bytecode.
903 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
904 wordSize - (int) sizeof(BasicObjectLock));
905 // We use c_rarg1/rdx so that if we go slow path it will be the correct
906 // register for unlock_object to pass to VM directly
1005 // the stack, will call InterpreterRuntime::at_unwind.
1006 Label slow_path;
1007 Label fast_path;
1008 safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
1009 jmp(fast_path);
1010 bind(slow_path);
1011 push(state);
1012 set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
1013 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
1014 reset_last_Java_frame(true);
1015 pop(state);
1016 bind(fast_path);
1017
1018 // JVMTI support. Make sure the safepoint poll test is issued prior.
1019 if (notify_jvmdi) {
1020 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
1021 } else {
1022 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1023 }
1024
1025 // remove activation
1026 // get sender sp
1027 movptr(rbx,
1028 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1029 if (StackReservedPages > 0) {
1030 // testing if reserved zone needs to be re-enabled
1031 Register rthread = r15_thread;
1032 Label no_reserved_zone_enabling;
1033
1034 // check if already enabled - if so no re-enabling needed
1035 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
1036 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1037 jcc(Assembler::equal, no_reserved_zone_enabling);
1038
1039 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1040 jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1041
1042 JFR_ONLY(leave_jfr_critical_section();)
1043
1044 call_VM_leaf(
1045 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1046 call_VM(noreg, CAST_FROM_FN_PTR(address,
1047 InterpreterRuntime::throw_delayed_StackOverflowError));
1048 should_not_reach_here();
1049
1050 bind(no_reserved_zone_enabling);
1051 }
1052
1053 leave(); // remove frame anchor
1054
1055 JFR_ONLY(leave_jfr_critical_section();)
1056
1057 pop(ret_addr); // get return address
1058 mov(rsp, rbx); // set sp to sender sp
1059 pop_cont_fastpath();
1060
1061 }
1062
1063 #if INCLUDE_JFR
1064 void InterpreterMacroAssembler::enter_jfr_critical_section() {
1065 const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1066 movbool(sampling_critical_section, true);
1067 }
1068
1069 void InterpreterMacroAssembler::leave_jfr_critical_section() {
1070 const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1071 movbool(sampling_critical_section, false);
1072 }
1073 #endif // INCLUDE_JFR
1074
1075 void InterpreterMacroAssembler::get_method_counters(Register method,
1076 Register mcs, Label& skip) {
1077 Label has_counters;
1078 movptr(mcs, Address(method, Method::method_counters_offset()));
1079 testptr(mcs, mcs);
1080 jcc(Assembler::notZero, has_counters);
1081 call_VM(noreg, CAST_FROM_FN_PTR(address,
1082 InterpreterRuntime::build_method_counters), method);
1083 movptr(mcs, Address(method,Method::method_counters_offset()));
1084 testptr(mcs, mcs);
1085 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1086 bind(has_counters);
1087 }
1088
1089
1090 // Lock object
1091 //
1092 // Args:
1093 // rdx, c_rarg1: BasicObjectLock to be used for locking
1094 //
1095 // Kills:
1096 // rax, rbx
1097 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1098 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1099
1100 Label done, slow_case;
1101
1102 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1103 const Register tmp_reg = rbx;
1104 const Register obj_reg = c_rarg3; // Will contain the oop
1105
1106 // Load object pointer into obj_reg
1107 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1108
1321 }
1322
1323
1324 void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
1325 if (ProfileInterpreter) {
1326 Label profile_continue;
1327
1328 // If no method data exists, go to profile_continue.
1329 test_method_data_pointer(mdp, profile_continue);
1330
1331 // We are taking a branch. Increment the taken count.
1332 increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1333
1334 // The method data pointer needs to be updated to reflect the new target.
1335 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1336 bind(profile_continue);
1337 }
1338 }
1339
1340
1341 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1342 if (ProfileInterpreter) {
1343 Label profile_continue;
1344
1345 // If no method data exists, go to profile_continue.
1346 test_method_data_pointer(mdp, profile_continue);
1347
1348 // We are not taking a branch. Increment the not taken count.
1349 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1350
1351 // The method data pointer needs to be updated to correspond to
1352 // the next bytecode
1353 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1354 bind(profile_continue);
1355 }
1356 }
1357
1358 void InterpreterMacroAssembler::profile_call(Register mdp) {
1359 if (ProfileInterpreter) {
1360 Label profile_continue;
1361
1362 // If no method data exists, go to profile_continue.
1363 test_method_data_pointer(mdp, profile_continue);
1364
1365 // We are making a call. Increment the count.
1366 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1367
1368 // The method data pointer needs to be updated to reflect the new target.
1369 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1370 bind(profile_continue);
1371 }
1372 }
1373
1536 // case_array_offset_in_bytes()
1537 movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1538 imulptr(index, reg2); // XXX l ?
1539 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1540
1541 // Update the case count
1542 increment_mdp_data_at(mdp,
1543 index,
1544 in_bytes(MultiBranchData::relative_count_offset()));
1545
1546 // The method data pointer needs to be updated.
1547 update_mdp_by_offset(mdp,
1548 index,
1549 in_bytes(MultiBranchData::
1550 relative_displacement_offset()));
1551
1552 bind(profile_continue);
1553 }
1554 }
1555
1556
1557
1558 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1559 if (state == atos) {
1560 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1561 }
1562 }
1563
1564
1565 // Jump if ((*counter_addr += increment) & mask) == 0
1566 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1567 Register scratch, Label* where) {
1568 // This update is actually not atomic and can lose a number of updates
1569 // under heavy contention, but the alternative of using the (contended)
1570 // atomic update here penalizes profiling paths too much.
1571 movl(scratch, counter_addr);
1572 incrementl(scratch, InvocationCounter::count_increment);
1573 movl(counter_addr, scratch);
1574 andl(scratch, mask);
1575 if (where != nullptr) {
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/constMethodFlags.hpp"
32 #include "oops/markWord.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/inlineKlass.hpp"
36 #include "oops/resolvedFieldEntry.hpp"
37 #include "oops/resolvedIndyEntry.hpp"
38 #include "oops/resolvedMethodEntry.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "prims/jvmtiThreadState.hpp"
41 #include "runtime/basicLock.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47
48 // Implementation of InterpreterMacroAssembler
49
50 void InterpreterMacroAssembler::jump_to_entry(address entry) {
51 assert(entry, "Entry must have been generated by now");
52 jump(RuntimeAddress(entry));
53 }
54
55 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
150 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
151 profile_obj_type(tmp, mdo_arg_addr);
152
153 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
154 addptr(mdp, to_add);
155 off_to_args += to_add;
156 }
157
158 if (MethodData::profile_return()) {
159 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
160 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
161 }
162
163 bind(done);
164
165 if (MethodData::profile_return()) {
166 // We're right after the type profile for the last
167 // argument. tmp is the number of cells left in the
168 // CallTypeData/VirtualCallTypeData to reach its end. Non null
169 // if there's a return to profile.
170 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
171 shll(tmp, log2i_exact((int)DataLayout::cell_size));
172 addptr(mdp, tmp);
173 }
174 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
175 } else {
176 assert(MethodData::profile_return(), "either profile call args or call ret");
177 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
178 }
179
180 // mdp points right after the end of the
181 // CallTypeData/VirtualCallTypeData, right after the cells for the
182 // return value type if there's one
183
184 bind(profile_continue);
185 }
186 }
187
188 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
189 assert_different_registers(mdp, ret, tmp, _bcp_register);
190 if (ProfileInterpreter && MethodData::profile_return()) {
195 if (MethodData::profile_return_jsr292_only()) {
196 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
197
198 // If we don't profile all invoke bytecodes we must make sure
199 // it's a bytecode we indeed profile. We can't go back to the
200 // beginning of the ProfileData we intend to update to check its
201 // type because we're right after it and we don't known its
202 // length
203 Label do_profile;
204 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
205 jcc(Assembler::equal, do_profile);
206 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
207 jcc(Assembler::equal, do_profile);
208 get_method(tmp);
209 cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
210 jcc(Assembler::notEqual, profile_continue);
211
212 bind(do_profile);
213 }
214
215 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
216 mov(tmp, ret);
217 profile_obj_type(tmp, mdo_ret_addr);
218
219 bind(profile_continue);
220 }
221 }
222
223 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
224 if (ProfileInterpreter && MethodData::profile_parameters()) {
225 Label profile_continue;
226
227 test_method_data_pointer(mdp, profile_continue);
228
229 // Load the offset of the area within the MDO used for
230 // parameters. If it's negative we're not profiling any parameters
231 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
232 testl(tmp1, tmp1);
233 jcc(Assembler::negative, profile_continue);
234
235 // Compute a pointer to the area for parameters from the offset
567 Register cpool,
568 Register index) {
569 assert_different_registers(cpool, index);
570
571 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
572 Register resolved_klasses = cpool;
573 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
574 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
575 }
576
577 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
578 // subtype of super_klass.
579 //
580 // Args:
581 // rax: superklass
582 // Rsub_klass: subklass
583 //
584 // Kills:
585 // rcx
586 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
587 Label& ok_is_subtype,
588 bool profile) {
589 assert(Rsub_klass != rax, "rax holds superklass");
590 assert(Rsub_klass != r14, "r14 holds locals");
591 assert(Rsub_klass != r13, "r13 holds bcp");
592 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
593
594 // Profile the not-null value's klass.
595 if (profile) {
596 profile_typecheck(rcx, Rsub_klass); // blows rcx
597 }
598 // Do the check.
599 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
600 }
601
602
603 // Java Expression Stack
604
605 void InterpreterMacroAssembler::pop_ptr(Register r) {
606 pop(r);
607 }
608
609 void InterpreterMacroAssembler::push_ptr(Register r) {
610 push(r);
611 }
612
613 void InterpreterMacroAssembler::push_i(Register r) {
614 push(r);
615 }
616
617 void InterpreterMacroAssembler::push_i_or_ptr(Register r) {
870 Label unlocked, unlock, no_unlock;
871
872 #ifdef ASSERT
873 Label not_preempted;
874 cmpptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
875 jcc(Assembler::equal, not_preempted);
876 stop("remove_activation: should not have alternate return address set");
877 bind(not_preempted);
878 #endif /* ASSERT */
879
880 const Register rthread = r15_thread;
881 const Register robj = c_rarg1;
882 const Register rmon = c_rarg1;
883
884 // get the value of _do_not_unlock_if_synchronized into rdx
885 const Address do_not_unlock_if_synchronized(rthread,
886 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
887 movbool(rbx, do_not_unlock_if_synchronized);
888 movbool(do_not_unlock_if_synchronized, false); // reset the flag
889
890 // get method access flags
891 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
892 load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
893 testl(rcx, JVM_ACC_SYNCHRONIZED);
894 jcc(Assembler::zero, unlocked);
895
896 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
897 // is set.
898 testbool(rbx);
899 jcc(Assembler::notZero, no_unlock);
900
901 // unlock monitor
902 push(state); // save result
903
904 // BasicObjectLock will be first in list, since this is a
905 // synchronized method. However, need to check that the object has
906 // not been unlocked by an explicit monitorexit bytecode.
907 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
908 wordSize - (int) sizeof(BasicObjectLock));
909 // We use c_rarg1/rdx so that if we go slow path it will be the correct
910 // register for unlock_object to pass to VM directly
1009 // the stack, will call InterpreterRuntime::at_unwind.
1010 Label slow_path;
1011 Label fast_path;
1012 safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
1013 jmp(fast_path);
1014 bind(slow_path);
1015 push(state);
1016 set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
1017 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
1018 reset_last_Java_frame(true);
1019 pop(state);
1020 bind(fast_path);
1021
1022 // JVMTI support. Make sure the safepoint poll test is issued prior.
1023 if (notify_jvmdi) {
1024 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
1025 } else {
1026 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1027 }
1028
1029 if (StackReservedPages > 0) {
1030 movptr(rbx,
1031 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1032 // testing if reserved zone needs to be re-enabled
1033 Register rthread = r15_thread;
1034 Label no_reserved_zone_enabling;
1035
1036 // check if already enabled - if so no re-enabling needed
1037 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
1038 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1039 jcc(Assembler::equal, no_reserved_zone_enabling);
1040
1041 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1042 jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1043
1044 JFR_ONLY(leave_jfr_critical_section();)
1045
1046 call_VM_leaf(
1047 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1048 call_VM(noreg, CAST_FROM_FN_PTR(address,
1049 InterpreterRuntime::throw_delayed_StackOverflowError));
1050 should_not_reach_here();
1051
1052 bind(no_reserved_zone_enabling);
1053 }
1054
1055 // remove activation
1056 // get sender sp
1057 movptr(rbx,
1058 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1059
1060 if (state == atos && InlineTypeReturnedAsFields) {
1061 Label skip;
1062 Label not_null;
1063 testptr(rax, rax);
1064 jcc(Assembler::notZero, not_null);
1065 // Returned value is null, zero all return registers because they may belong to oop fields
1066 xorq(j_rarg1, j_rarg1);
1067 xorq(j_rarg2, j_rarg2);
1068 xorq(j_rarg3, j_rarg3);
1069 xorq(j_rarg4, j_rarg4);
1070 xorq(j_rarg5, j_rarg5);
1071 jmp(skip);
1072 bind(not_null);
1073
1074 // Check if we are returning an non-null inline type and load its fields into registers
1075 test_oop_is_not_inline_type(rax, rscratch1, skip, /* can_be_null= */ false);
1076
1077 #ifndef _LP64
1078 super_call_VM_leaf(StubRoutines::load_inline_type_fields_in_regs());
1079 #else
1080 // Load fields from a buffered value with an inline class specific handler
1081 load_klass(rdi, rax, rscratch1);
1082 movptr(rdi, Address(rdi, InlineKlass::adr_members_offset()));
1083 movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
1084 // Unpack handler can be null if inline type is not scalarizable in returns
1085 testptr(rdi, rdi);
1086 jcc(Assembler::zero, skip);
1087 call(rdi);
1088 #endif
1089 #ifdef ASSERT
1090 // TODO 8284443 Enable
1091 if (StressCallingConvention && false) {
1092 Label skip_stress;
1093 movptr(rscratch1, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1094 movl(rscratch1, Address(rscratch1, Method::flags_offset()));
1095 testl(rcx, MethodFlags::has_scalarized_return_flag());
1096 jcc(Assembler::zero, skip_stress);
1097 load_klass(rax, rax, rscratch1);
1098 orptr(rax, 1);
1099 bind(skip_stress);
1100 }
1101 #endif
1102 // call above kills the value in rbx. Reload it.
1103 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1104 bind(skip);
1105 }
1106
1107 leave(); // remove frame anchor
1108
1109 JFR_ONLY(leave_jfr_critical_section();)
1110
1111 pop(ret_addr); // get return address
1112 mov(rsp, rbx); // set sp to sender sp
1113 pop_cont_fastpath();
1114
1115 }
1116
1117 #if INCLUDE_JFR
1118 void InterpreterMacroAssembler::enter_jfr_critical_section() {
1119 const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1120 movbool(sampling_critical_section, true);
1121 }
1122
1123 void InterpreterMacroAssembler::leave_jfr_critical_section() {
1124 const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1125 movbool(sampling_critical_section, false);
1126 }
1127 #endif // INCLUDE_JFR
1128
1129 void InterpreterMacroAssembler::get_method_counters(Register method,
1130 Register mcs, Label& skip) {
1131 Label has_counters;
1132 movptr(mcs, Address(method, Method::method_counters_offset()));
1133 testptr(mcs, mcs);
1134 jcc(Assembler::notZero, has_counters);
1135 call_VM(noreg, CAST_FROM_FN_PTR(address,
1136 InterpreterRuntime::build_method_counters), method);
1137 movptr(mcs, Address(method,Method::method_counters_offset()));
1138 testptr(mcs, mcs);
1139 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1140 bind(has_counters);
1141 }
1142
1143 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
1144 Register t1, Register t2,
1145 bool clear_fields, Label& alloc_failed) {
1146 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
1147 if (DTraceAllocProbes) {
1148 // Trigger dtrace event for fastpath
1149 push(atos);
1150 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
1151 pop(atos);
1152 }
1153 }
1154
1155 void InterpreterMacroAssembler::read_flat_field(Register entry, Register obj) {
1156 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
1157 obj, entry);
1158 get_vm_result_oop(obj);
1159 }
1160
1161 void InterpreterMacroAssembler::write_flat_field(Register entry, Register tmp1, Register tmp2,
1162 Register obj, Register off, Register value) {
1163 assert_different_registers(entry, tmp1, tmp2, obj, off, value);
1164
1165 Label slow_path, done;
1166
1167 load_unsigned_byte(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::flags_offset())));
1168 test_field_is_not_null_free_inline_type(tmp2, tmp1, slow_path);
1169
1170 null_check(value); // FIXME JDK-8341120
1171
1172 lea(obj, Address(obj, off, Address::times_1));
1173
1174 load_klass(tmp2, value, tmp1);
1175 payload_addr(value, value, tmp2);
1176
1177 Register idx = tmp1;
1178 load_unsigned_short(idx, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1179 movptr(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
1180
1181 Register layout_info = off;
1182 inline_layout_info(tmp2, idx, layout_info);
1183
1184 flat_field_copy(IN_HEAP, value, obj, layout_info);
1185 jmp(done);
1186
1187 bind(slow_path);
1188 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flat_field), obj, value, entry);
1189
1190 bind(done);
1191 }
1192
1193 // Lock object
1194 //
1195 // Args:
1196 // rdx, c_rarg1: BasicObjectLock to be used for locking
1197 //
1198 // Kills:
1199 // rax, rbx
1200 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1201 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1202
1203 Label done, slow_case;
1204
1205 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1206 const Register tmp_reg = rbx;
1207 const Register obj_reg = c_rarg3; // Will contain the oop
1208
1209 // Load object pointer into obj_reg
1210 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
1211
1424 }
1425
1426
1427 void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
1428 if (ProfileInterpreter) {
1429 Label profile_continue;
1430
1431 // If no method data exists, go to profile_continue.
1432 test_method_data_pointer(mdp, profile_continue);
1433
1434 // We are taking a branch. Increment the taken count.
1435 increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1436
1437 // The method data pointer needs to be updated to reflect the new target.
1438 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1439 bind(profile_continue);
1440 }
1441 }
1442
1443
1444 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1445 if (ProfileInterpreter) {
1446 Label profile_continue;
1447
1448 // If no method data exists, go to profile_continue.
1449 test_method_data_pointer(mdp, profile_continue);
1450
1451 // We are not taking a branch. Increment the not taken count.
1452 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1453
1454 // The method data pointer needs to be updated to correspond to
1455 // the next bytecode
1456 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()): in_bytes(BranchData::branch_data_size()));
1457 bind(profile_continue);
1458 }
1459 }
1460
1461 void InterpreterMacroAssembler::profile_call(Register mdp) {
1462 if (ProfileInterpreter) {
1463 Label profile_continue;
1464
1465 // If no method data exists, go to profile_continue.
1466 test_method_data_pointer(mdp, profile_continue);
1467
1468 // We are making a call. Increment the count.
1469 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1470
1471 // The method data pointer needs to be updated to reflect the new target.
1472 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1473 bind(profile_continue);
1474 }
1475 }
1476
1639 // case_array_offset_in_bytes()
1640 movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1641 imulptr(index, reg2); // XXX l ?
1642 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1643
1644 // Update the case count
1645 increment_mdp_data_at(mdp,
1646 index,
1647 in_bytes(MultiBranchData::relative_count_offset()));
1648
1649 // The method data pointer needs to be updated.
1650 update_mdp_by_offset(mdp,
1651 index,
1652 in_bytes(MultiBranchData::
1653 relative_displacement_offset()));
1654
1655 bind(profile_continue);
1656 }
1657 }
1658
1659 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1660 Register array,
1661 Register tmp) {
1662 if (ProfileInterpreter) {
1663 Label profile_continue;
1664
1665 // If no method data exists, go to profile_continue.
1666 test_method_data_pointer(mdp, profile_continue);
1667
1668 mov(tmp, array);
1669 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1670
1671 Label not_flat;
1672 test_non_flat_array_oop(array, tmp, not_flat);
1673
1674 set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1675
1676 bind(not_flat);
1677
1678 Label not_null_free;
1679 test_non_null_free_array_oop(array, tmp, not_null_free);
1680
1681 set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1682
1683 bind(not_null_free);
1684
1685 bind(profile_continue);
1686 }
1687 }
1688
1689 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1690 Register array,
1691 Register tmp);
1692 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1693 Register array,
1694 Register tmp);
1695
1696
1697 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1698 if (ProfileInterpreter) {
1699 Label profile_continue;
1700
1701 // If no method data exists, go to profile_continue.
1702 test_method_data_pointer(mdp, profile_continue);
1703
1704 Label done, update;
1705 testptr(element, element);
1706 jccb(Assembler::notZero, update);
1707 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1708 jmp(done);
1709
1710 bind(update);
1711 load_klass(tmp, element, rscratch1);
1712
1713 // Record the object type.
1714 profile_receiver_type(tmp, mdp, 0);
1715
1716 bind(done);
1717
1718 // The method data pointer needs to be updated.
1719 update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1720
1721 bind(profile_continue);
1722 }
1723 }
1724
1725 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1726 Register element,
1727 Register tmp) {
1728 if (ProfileInterpreter) {
1729 Label profile_continue;
1730
1731 // If no method data exists, go to profile_continue.
1732 test_method_data_pointer(mdp, profile_continue);
1733
1734 mov(tmp, element);
1735 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1736
1737 // The method data pointer needs to be updated.
1738 update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1739
1740 bind(profile_continue);
1741 }
1742 }
1743
1744 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1745 Register left,
1746 Register right,
1747 Register tmp) {
1748 if (ProfileInterpreter) {
1749 Label profile_continue;
1750
1751 // If no method data exists, go to profile_continue.
1752 test_method_data_pointer(mdp, profile_continue);
1753
1754 mov(tmp, left);
1755 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1756
1757 Label left_not_inline_type;
1758 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1759 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1760 bind(left_not_inline_type);
1761
1762 mov(tmp, right);
1763 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1764
1765 Label right_not_inline_type;
1766 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1767 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1768 bind(right_not_inline_type);
1769
1770 bind(profile_continue);
1771 }
1772 }
1773
1774
1775 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1776 if (state == atos) {
1777 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1778 }
1779 }
1780
1781
1782 // Jump if ((*counter_addr += increment) & mask) == 0
1783 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1784 Register scratch, Label* where) {
1785 // This update is actually not atomic and can lose a number of updates
1786 // under heavy contention, but the alternative of using the (contended)
1787 // atomic update here penalizes profiling paths too much.
1788 movl(scratch, counter_addr);
1789 incrementl(scratch, InvocationCounter::count_increment);
1790 movl(counter_addr, scratch);
1791 andl(scratch, mask);
1792 if (where != nullptr) {
|