11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/markWord.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "oops/resolvedFieldEntry.hpp"
35 #include "oops/resolvedIndyEntry.hpp"
36 #include "oops/resolvedMethodEntry.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/basicLock.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/javaThread.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 // Implementation of InterpreterMacroAssembler
47
48 void InterpreterMacroAssembler::jump_to_entry(address entry) {
49 assert(entry, "Entry must have been generated by now");
50 jump(RuntimeAddress(entry));
51 }
52
53 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
148 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
149 profile_obj_type(tmp, mdo_arg_addr);
150
151 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
152 addptr(mdp, to_add);
153 off_to_args += to_add;
154 }
155
156 if (MethodData::profile_return()) {
157 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
158 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
159 }
160
161 bind(done);
162
163 if (MethodData::profile_return()) {
164 // We're right after the type profile for the last
165 // argument. tmp is the number of cells left in the
166 // CallTypeData/VirtualCallTypeData to reach its end. Non null
167 // if there's a return to profile.
168 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
169 shll(tmp, log2i_exact((int)DataLayout::cell_size));
170 addptr(mdp, tmp);
171 }
172 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
173 } else {
174 assert(MethodData::profile_return(), "either profile call args or call ret");
175 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
176 }
177
178 // mdp points right after the end of the
179 // CallTypeData/VirtualCallTypeData, right after the cells for the
180 // return value type if there's one
181
182 bind(profile_continue);
183 }
184 }
185
186 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
187 assert_different_registers(mdp, ret, tmp, _bcp_register);
188 if (ProfileInterpreter && MethodData::profile_return()) {
193 if (MethodData::profile_return_jsr292_only()) {
194 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
195
196 // If we don't profile all invoke bytecodes we must make sure
197 // it's a bytecode we indeed profile. We can't go back to the
198 // beginning of the ProfileData we intend to update to check its
199 // type because we're right after it and we don't known its
200 // length
201 Label do_profile;
202 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
203 jcc(Assembler::equal, do_profile);
204 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
205 jcc(Assembler::equal, do_profile);
206 get_method(tmp);
207 cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
208 jcc(Assembler::notEqual, profile_continue);
209
210 bind(do_profile);
211 }
212
213 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
214 mov(tmp, ret);
215 profile_obj_type(tmp, mdo_ret_addr);
216
217 bind(profile_continue);
218 }
219 }
220
221 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
222 if (ProfileInterpreter && MethodData::profile_parameters()) {
223 Label profile_continue;
224
225 test_method_data_pointer(mdp, profile_continue);
226
227 // Load the offset of the area within the MDO used for
228 // parameters. If it's negative we're not profiling any parameters
229 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
230 testl(tmp1, tmp1);
231 jcc(Assembler::negative, profile_continue);
232
233 // Compute a pointer to the area for parameters from the offset
500 Register cpool,
501 Register index) {
502 assert_different_registers(cpool, index);
503
504 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
505 Register resolved_klasses = cpool;
506 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
507 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
508 }
509
510 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
511 // subtype of super_klass.
512 //
513 // Args:
514 // rax: superklass
515 // Rsub_klass: subklass
516 //
517 // Kills:
518 // rcx, rdi
519 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
520 Label& ok_is_subtype) {
521 assert(Rsub_klass != rax, "rax holds superklass");
522 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
523 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
524 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
525 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
526
527 // Profile the not-null value's klass.
528 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
529
530 // Do the check.
531 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
532 }
533
534
535 // Java Expression Stack
536
537 void InterpreterMacroAssembler::pop_ptr(Register r) {
538 pop(r);
539 }
540
541 void InterpreterMacroAssembler::push_ptr(Register r) {
542 push(r);
543 }
544
545 void InterpreterMacroAssembler::push_i(Register r) {
546 push(r);
547 }
548
809 // that would normally not be safe to use. Such bad returns into unsafe territory of
810 // the stack, will call InterpreterRuntime::at_unwind.
811 Label slow_path;
812 Label fast_path;
813 safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
814 jmp(fast_path);
815 bind(slow_path);
816 push(state);
817 set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
818 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
819 reset_last_Java_frame(rthread, true);
820 pop(state);
821 bind(fast_path);
822
823 // get the value of _do_not_unlock_if_synchronized into rdx
824 const Address do_not_unlock_if_synchronized(rthread,
825 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
826 movbool(rbx, do_not_unlock_if_synchronized);
827 movbool(do_not_unlock_if_synchronized, false); // reset the flag
828
829 // get method access flags
830 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
831 load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
832 testl(rcx, JVM_ACC_SYNCHRONIZED);
833 jcc(Assembler::zero, unlocked);
834
835 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
836 // is set.
837 testbool(rbx);
838 jcc(Assembler::notZero, no_unlock);
839
840 // unlock monitor
841 push(state); // save result
842
843 // BasicObjectLock will be first in list, since this is a
844 // synchronized method. However, need to check that the object has
845 // not been unlocked by an explicit monitorexit bytecode.
846 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
847 wordSize - (int) sizeof(BasicObjectLock));
848 // We use c_rarg1/rdx so that if we go slow path it will be the correct
849 // register for unlock_object to pass to VM directly
931 bind(loop);
932 // check if current entry is used
933 cmpptr(Address(rmon, BasicObjectLock::obj_offset()), NULL_WORD);
934 jcc(Assembler::notEqual, exception);
935
936 addptr(rmon, entry_size); // otherwise advance to next entry
937 bind(entry);
938 cmpptr(rmon, rbx); // check if bottom reached
939 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
940 }
941
942 bind(no_unlock);
943
944 // jvmti support
945 if (notify_jvmdi) {
946 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
947 } else {
948 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
949 }
950
951 // remove activation
952 // get sender sp
953 movptr(rbx,
954 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
955 if (StackReservedPages > 0) {
956 // testing if reserved zone needs to be re-enabled
957 Register rthread = r15_thread;
958 Label no_reserved_zone_enabling;
959
960 // check if already enabled - if so no re-enabling needed
961 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
962 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
963 jcc(Assembler::equal, no_reserved_zone_enabling);
964
965 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
966 jcc(Assembler::lessEqual, no_reserved_zone_enabling);
967
968 call_VM_leaf(
969 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
970 call_VM(noreg, CAST_FROM_FN_PTR(address,
971 InterpreterRuntime::throw_delayed_StackOverflowError));
972 should_not_reach_here();
973
974 bind(no_reserved_zone_enabling);
975 }
976 leave(); // remove frame anchor
977 pop(ret_addr); // get return address
978 mov(rsp, rbx); // set sp to sender sp
979 pop_cont_fastpath();
980 }
981
982 void InterpreterMacroAssembler::get_method_counters(Register method,
983 Register mcs, Label& skip) {
984 Label has_counters;
985 movptr(mcs, Address(method, Method::method_counters_offset()));
986 testptr(mcs, mcs);
987 jcc(Assembler::notZero, has_counters);
988 call_VM(noreg, CAST_FROM_FN_PTR(address,
989 InterpreterRuntime::build_method_counters), method);
990 movptr(mcs, Address(method,Method::method_counters_offset()));
991 testptr(mcs, mcs);
992 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
993 bind(has_counters);
994 }
995
996
997 // Lock object
998 //
999 // Args:
1000 // rdx, c_rarg1: BasicObjectLock to be used for locking
1001 //
1002 // Kills:
1003 // rax, rbx
1004 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1005 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1006
1007 if (LockingMode == LM_MONITOR) {
1008 call_VM_preemptable(noreg,
1009 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1010 lock_reg);
1011 } else {
1012 Label count_locking, done, slow_case;
1013
1014 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1015 const Register tmp_reg = rbx;
1022 BasicLock::displaced_header_offset_in_bytes();
1023
1024 // Load object pointer into obj_reg
1025 movptr(obj_reg, Address(lock_reg, obj_offset));
1026
1027 if (DiagnoseSyncOnValueBasedClasses != 0) {
1028 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1029 testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
1030 jcc(Assembler::notZero, slow_case);
1031 }
1032
1033 if (LockingMode == LM_LIGHTWEIGHT) {
1034 const Register thread = r15_thread;
1035 lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case);
1036 } else if (LockingMode == LM_LEGACY) {
1037 // Load immediate 1 into swap_reg %rax
1038 movl(swap_reg, 1);
1039
1040 // Load (object->mark() | 1) into swap_reg %rax
1041 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1042
1043 // Save (object->mark() | 1) into BasicLock's displaced header
1044 movptr(Address(lock_reg, mark_offset), swap_reg);
1045
1046 assert(lock_offset == 0,
1047 "displaced header must be first word in BasicObjectLock");
1048
1049 lock();
1050 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1051 jcc(Assembler::zero, count_locking);
1052
1053 const int zero_bits = 7;
1054
1055 // Fast check for recursive lock.
1056 //
1057 // Can apply the optimization only if this is a stack lock
1058 // allocated in this thread. For efficiency, we can focus on
1059 // recently allocated stack locks (instead of reading the stack
1060 // base and checking whether 'mark' points inside the current
1061 // thread stack):
1373 test_method_data_pointer(mdp, profile_continue);
1374
1375 // We are taking a branch. Increment the taken count.
1376 // We inline increment_mdp_data_at to return bumped_count in a register
1377 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1378 Address data(mdp, in_bytes(JumpData::taken_offset()));
1379 movptr(bumped_count, data);
1380 assert(DataLayout::counter_increment == 1,
1381 "flow-free idiom only works with 1");
1382 addptr(bumped_count, DataLayout::counter_increment);
1383 sbbptr(bumped_count, 0);
1384 movptr(data, bumped_count); // Store back out
1385
1386 // The method data pointer needs to be updated to reflect the new target.
1387 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1388 bind(profile_continue);
1389 }
1390 }
1391
1392
1393 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1394 if (ProfileInterpreter) {
1395 Label profile_continue;
1396
1397 // If no method data exists, go to profile_continue.
1398 test_method_data_pointer(mdp, profile_continue);
1399
1400 // We are taking a branch. Increment the not taken count.
1401 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1402
1403 // The method data pointer needs to be updated to correspond to
1404 // the next bytecode
1405 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1406 bind(profile_continue);
1407 }
1408 }
1409
1410 void InterpreterMacroAssembler::profile_call(Register mdp) {
1411 if (ProfileInterpreter) {
1412 Label profile_continue;
1413
1414 // If no method data exists, go to profile_continue.
1415 test_method_data_pointer(mdp, profile_continue);
1416
1417 // We are making a call. Increment the count.
1418 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1419
1420 // The method data pointer needs to be updated to reflect the new target.
1421 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1422 bind(profile_continue);
1423 }
1424 }
1425
1448 Register reg2,
1449 bool receiver_can_be_null) {
1450 if (ProfileInterpreter) {
1451 Label profile_continue;
1452
1453 // If no method data exists, go to profile_continue.
1454 test_method_data_pointer(mdp, profile_continue);
1455
1456 Label skip_receiver_profile;
1457 if (receiver_can_be_null) {
1458 Label not_null;
1459 testptr(receiver, receiver);
1460 jccb(Assembler::notZero, not_null);
1461 // We are making a call. Increment the count for null receiver.
1462 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1463 jmp(skip_receiver_profile);
1464 bind(not_null);
1465 }
1466
1467 // Record the receiver type.
1468 record_klass_in_profile(receiver, mdp, reg2, true);
1469 bind(skip_receiver_profile);
1470
1471 // The method data pointer needs to be updated to reflect the new target.
1472 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1473 bind(profile_continue);
1474 }
1475 }
1476
1477 // This routine creates a state machine for updating the multi-row
1478 // type profile at a virtual call site (or other type-sensitive bytecode).
1479 // The machine visits each row (of receiver/count) until the receiver type
1480 // is found, or until it runs out of rows. At the same time, it remembers
1481 // the location of the first empty row. (An empty row records null for its
1482 // receiver, and can be allocated for a newly-observed receiver type.)
1483 // Because there are two degrees of freedom in the state, a simple linear
1484 // search will not work; it must be a decision tree. Hence this helper
1485 // function is recursive, to generate the required tree structured code.
1486 // It's the interpreter, so we are trading off code space for speed.
1487 // See below for example code.
1488 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1489 Register receiver, Register mdp,
1490 Register reg2, int start_row,
1491 Label& done, bool is_virtual_call) {
1492 if (TypeProfileWidth == 0) {
1493 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1494 } else {
1495 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1496 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1497 }
1498 }
1499
1500 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
1501 Label& done, int total_rows,
1502 OffsetFunction item_offset_fn,
1503 OffsetFunction item_count_offset_fn) {
1504 int last_row = total_rows - 1;
1505 assert(start_row <= last_row, "must be work left to do");
1506 // Test this row for both the item and for null.
1507 // Take any of three different outcomes:
1508 // 1. found item => increment count and goto done
1509 // 2. found null => keep looking for case 1, maybe allocate this cell
1510 // 3. found something else => keep looking for cases 1 and 2
1511 // Case 3 is handled by a recursive call.
1575 // // inner copy of decision tree, rooted at row[1]
1576 // if (row[1].rec == rec) { row[1].incr(); goto done; }
1577 // if (row[1].rec != nullptr) {
1578 // // degenerate decision tree, rooted at row[2]
1579 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1580 // if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1581 // row[2].init(rec); goto done;
1582 // } else {
1583 // // remember row[1] is empty
1584 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1585 // row[1].init(rec); goto done;
1586 // }
1587 // } else {
1588 // // remember row[0] is empty
1589 // if (row[1].rec == rec) { row[1].incr(); goto done; }
1590 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1591 // row[0].init(rec); goto done;
1592 // }
1593 // done:
1594
1595 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1596 Register mdp, Register reg2,
1597 bool is_virtual_call) {
1598 assert(ProfileInterpreter, "must be profiling");
1599 Label done;
1600
1601 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1602
1603 bind (done);
1604 }
1605
1606 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1607 Register mdp) {
1608 if (ProfileInterpreter) {
1609 Label profile_continue;
1610 uint row;
1611
1612 // If no method data exists, go to profile_continue.
1613 test_method_data_pointer(mdp, profile_continue);
1614
1615 // Update the total ret count.
1616 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1617
1618 for (row = 0; row < RetData::row_limit(); row++) {
1619 Label next_test;
1620
1621 // See if return_bci is equal to bci[n]:
1658 update_mdp_by_constant(mdp, mdp_delta);
1659
1660 bind(profile_continue);
1661 }
1662 }
1663
1664
1665 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1666 if (ProfileInterpreter) {
1667 Label profile_continue;
1668
1669 // If no method data exists, go to profile_continue.
1670 test_method_data_pointer(mdp, profile_continue);
1671
1672 // The method data pointer needs to be updated.
1673 int mdp_delta = in_bytes(BitData::bit_data_size());
1674 if (TypeProfileCasts) {
1675 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1676
1677 // Record the object type.
1678 record_klass_in_profile(klass, mdp, reg2, false);
1679 }
1680 update_mdp_by_constant(mdp, mdp_delta);
1681
1682 bind(profile_continue);
1683 }
1684 }
1685
1686
1687 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1688 if (ProfileInterpreter) {
1689 Label profile_continue;
1690
1691 // If no method data exists, go to profile_continue.
1692 test_method_data_pointer(mdp, profile_continue);
1693
1694 // Update the default case count
1695 increment_mdp_data_at(mdp,
1696 in_bytes(MultiBranchData::default_count_offset()));
1697
1698 // The method data pointer needs to be updated.
1718 // case_array_offset_in_bytes()
1719 movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1720 imulptr(index, reg2); // XXX l ?
1721 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1722
1723 // Update the case count
1724 increment_mdp_data_at(mdp,
1725 index,
1726 in_bytes(MultiBranchData::relative_count_offset()));
1727
1728 // The method data pointer needs to be updated.
1729 update_mdp_by_offset(mdp,
1730 index,
1731 in_bytes(MultiBranchData::
1732 relative_displacement_offset()));
1733
1734 bind(profile_continue);
1735 }
1736 }
1737
1738
1739
1740 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1741 if (state == atos) {
1742 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1743 }
1744 }
1745
1746
1747 // Jump if ((*counter_addr += increment) & mask) == 0
1748 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1749 Register scratch, Label* where) {
1750 // This update is actually not atomic and can lose a number of updates
1751 // under heavy contention, but the alternative of using the (contended)
1752 // atomic update here penalizes profiling paths too much.
1753 movl(scratch, counter_addr);
1754 incrementl(scratch, InvocationCounter::count_increment);
1755 movl(counter_addr, scratch);
1756 andl(scratch, mask);
1757 if (where != nullptr) {
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compiler_globals.hpp"
26 #include "interp_masm_x86.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "logging/log.hpp"
30 #include "oops/arrayOop.hpp"
31 #include "oops/constMethodFlags.hpp"
32 #include "oops/markWord.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/inlineKlass.hpp"
36 #include "oops/resolvedFieldEntry.hpp"
37 #include "oops/resolvedIndyEntry.hpp"
38 #include "oops/resolvedMethodEntry.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "prims/jvmtiThreadState.hpp"
41 #include "runtime/basicLock.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/powerOfTwo.hpp"
47
48 // Implementation of InterpreterMacroAssembler
49
50 void InterpreterMacroAssembler::jump_to_entry(address entry) {
51 assert(entry, "Entry must have been generated by now");
52 jump(RuntimeAddress(entry));
53 }
54
55 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
150 Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
151 profile_obj_type(tmp, mdo_arg_addr);
152
153 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
154 addptr(mdp, to_add);
155 off_to_args += to_add;
156 }
157
158 if (MethodData::profile_return()) {
159 movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
160 subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
161 }
162
163 bind(done);
164
165 if (MethodData::profile_return()) {
166 // We're right after the type profile for the last
167 // argument. tmp is the number of cells left in the
168 // CallTypeData/VirtualCallTypeData to reach its end. Non null
169 // if there's a return to profile.
170 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
171 shll(tmp, log2i_exact((int)DataLayout::cell_size));
172 addptr(mdp, tmp);
173 }
174 movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
175 } else {
176 assert(MethodData::profile_return(), "either profile call args or call ret");
177 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
178 }
179
180 // mdp points right after the end of the
181 // CallTypeData/VirtualCallTypeData, right after the cells for the
182 // return value type if there's one
183
184 bind(profile_continue);
185 }
186 }
187
188 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
189 assert_different_registers(mdp, ret, tmp, _bcp_register);
190 if (ProfileInterpreter && MethodData::profile_return()) {
195 if (MethodData::profile_return_jsr292_only()) {
196 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
197
198 // If we don't profile all invoke bytecodes we must make sure
199 // it's a bytecode we indeed profile. We can't go back to the
200 // beginning of the ProfileData we intend to update to check its
201 // type because we're right after it and we don't known its
202 // length
203 Label do_profile;
204 cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
205 jcc(Assembler::equal, do_profile);
206 cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
207 jcc(Assembler::equal, do_profile);
208 get_method(tmp);
209 cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
210 jcc(Assembler::notEqual, profile_continue);
211
212 bind(do_profile);
213 }
214
215 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
216 mov(tmp, ret);
217 profile_obj_type(tmp, mdo_ret_addr);
218
219 bind(profile_continue);
220 }
221 }
222
223 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
224 if (ProfileInterpreter && MethodData::profile_parameters()) {
225 Label profile_continue;
226
227 test_method_data_pointer(mdp, profile_continue);
228
229 // Load the offset of the area within the MDO used for
230 // parameters. If it's negative we're not profiling any parameters
231 movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
232 testl(tmp1, tmp1);
233 jcc(Assembler::negative, profile_continue);
234
235 // Compute a pointer to the area for parameters from the offset
502 Register cpool,
503 Register index) {
504 assert_different_registers(cpool, index);
505
506 movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
507 Register resolved_klasses = cpool;
508 movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
509 movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
510 }
511
512 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
513 // subtype of super_klass.
514 //
515 // Args:
516 // rax: superklass
517 // Rsub_klass: subklass
518 //
519 // Kills:
520 // rcx, rdi
521 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
522 Label& ok_is_subtype,
523 bool profile) {
524 assert(Rsub_klass != rax, "rax holds superklass");
525 LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
526 LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
527 assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
528 assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
529
530 // Profile the not-null value's klass.
531 if (profile) {
532 profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
533 }
534
535 // Do the check.
536 check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
537 }
538
539
540 // Java Expression Stack
541
542 void InterpreterMacroAssembler::pop_ptr(Register r) {
543 pop(r);
544 }
545
546 void InterpreterMacroAssembler::push_ptr(Register r) {
547 push(r);
548 }
549
550 void InterpreterMacroAssembler::push_i(Register r) {
551 push(r);
552 }
553
814 // that would normally not be safe to use. Such bad returns into unsafe territory of
815 // the stack, will call InterpreterRuntime::at_unwind.
816 Label slow_path;
817 Label fast_path;
818 safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
819 jmp(fast_path);
820 bind(slow_path);
821 push(state);
822 set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
823 super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
824 reset_last_Java_frame(rthread, true);
825 pop(state);
826 bind(fast_path);
827
828 // get the value of _do_not_unlock_if_synchronized into rdx
829 const Address do_not_unlock_if_synchronized(rthread,
830 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
831 movbool(rbx, do_not_unlock_if_synchronized);
832 movbool(do_not_unlock_if_synchronized, false); // reset the flag
833
834 // get method access flags
835 movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
836 load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
837 testl(rcx, JVM_ACC_SYNCHRONIZED);
838 jcc(Assembler::zero, unlocked);
839
840 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
841 // is set.
842 testbool(rbx);
843 jcc(Assembler::notZero, no_unlock);
844
845 // unlock monitor
846 push(state); // save result
847
848 // BasicObjectLock will be first in list, since this is a
849 // synchronized method. However, need to check that the object has
850 // not been unlocked by an explicit monitorexit bytecode.
851 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
852 wordSize - (int) sizeof(BasicObjectLock));
853 // We use c_rarg1/rdx so that if we go slow path it will be the correct
854 // register for unlock_object to pass to VM directly
936 bind(loop);
937 // check if current entry is used
938 cmpptr(Address(rmon, BasicObjectLock::obj_offset()), NULL_WORD);
939 jcc(Assembler::notEqual, exception);
940
941 addptr(rmon, entry_size); // otherwise advance to next entry
942 bind(entry);
943 cmpptr(rmon, rbx); // check if bottom reached
944 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
945 }
946
947 bind(no_unlock);
948
949 // jvmti support
950 if (notify_jvmdi) {
951 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
952 } else {
953 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
954 }
955
956 if (StackReservedPages > 0) {
957 movptr(rbx,
958 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
959 // testing if reserved zone needs to be re-enabled
960 Register rthread = r15_thread;
961 Label no_reserved_zone_enabling;
962
963 // check if already enabled - if so no re-enabling needed
964 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
965 cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
966 jcc(Assembler::equal, no_reserved_zone_enabling);
967
968 cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
969 jcc(Assembler::lessEqual, no_reserved_zone_enabling);
970
971 call_VM_leaf(
972 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
973 call_VM(noreg, CAST_FROM_FN_PTR(address,
974 InterpreterRuntime::throw_delayed_StackOverflowError));
975 should_not_reach_here();
976
977 bind(no_reserved_zone_enabling);
978 }
979
980 // remove activation
981 // get sender sp
982 movptr(rbx,
983 Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
984
985 if (state == atos && InlineTypeReturnedAsFields) {
986 // Check if we are returning an non-null inline type and load its fields into registers
987 Label skip;
988 test_oop_is_not_inline_type(rax, rscratch1, skip);
989
990 #ifndef _LP64
991 super_call_VM_leaf(StubRoutines::load_inline_type_fields_in_regs());
992 #else
993 // Load fields from a buffered value with an inline class specific handler
994 load_klass(rdi, rax, rscratch1);
995 movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
996 movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
997 // Unpack handler can be null if inline type is not scalarizable in returns
998 testptr(rdi, rdi);
999 jcc(Assembler::zero, skip);
1000 call(rdi);
1001 #endif
1002 #ifdef ASSERT
1003 // TODO 8284443 Enable
1004 if (StressCallingConvention && false) {
1005 Label skip_stress;
1006 movptr(rscratch1, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1007 movl(rscratch1, Address(rscratch1, Method::flags_offset()));
1008 testl(rcx, MethodFlags::has_scalarized_return_flag());
1009 jcc(Assembler::zero, skip_stress);
1010 load_klass(rax, rax, rscratch1);
1011 orptr(rax, 1);
1012 bind(skip_stress);
1013 }
1014 #endif
1015 // call above kills the value in rbx. Reload it.
1016 movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1017 bind(skip);
1018 }
1019 leave(); // remove frame anchor
1020 pop(ret_addr); // get return address
1021 mov(rsp, rbx); // set sp to sender sp
1022 pop_cont_fastpath();
1023 }
1024
1025 void InterpreterMacroAssembler::get_method_counters(Register method,
1026 Register mcs, Label& skip) {
1027 Label has_counters;
1028 movptr(mcs, Address(method, Method::method_counters_offset()));
1029 testptr(mcs, mcs);
1030 jcc(Assembler::notZero, has_counters);
1031 call_VM(noreg, CAST_FROM_FN_PTR(address,
1032 InterpreterRuntime::build_method_counters), method);
1033 movptr(mcs, Address(method,Method::method_counters_offset()));
1034 testptr(mcs, mcs);
1035 jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1036 bind(has_counters);
1037 }
1038
1039 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
1040 Register t1, Register t2,
1041 bool clear_fields, Label& alloc_failed) {
1042 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
1043 if (DTraceMethodProbes) {
1044 // Trigger dtrace event for fastpath
1045 push(atos);
1046 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
1047 pop(atos);
1048 }
1049 }
1050
1051 void InterpreterMacroAssembler::read_flat_field(Register entry, Register tmp1, Register tmp2, Register obj) {
1052 Label alloc_failed, done;
1053 const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1054 const Register dst_temp = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1055 assert_different_registers(obj, entry, tmp1, tmp2, dst_temp, r8, r9);
1056
1057 // FIXME: code below could be re-written to better use InlineLayoutInfo data structure
1058 // see aarch64 version
1059
1060 // Grap the inline field klass
1061 const Register field_klass = tmp1;
1062 load_unsigned_short(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1063 movptr(tmp1, Address(entry, ResolvedFieldEntry::field_holder_offset()));
1064 get_inline_type_field_klass(tmp1, tmp2, field_klass);
1065
1066 // allocate buffer
1067 push(obj); // push object being read from // FIXME spilling on stack could probably be avoided by using tmp2
1068 allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1069
1070 // Have an oop instance buffer, copy into it
1071 load_unsigned_short(r9, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1072 movptr(r8, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
1073 inline_layout_info(r8, r9, r8); // holder, index, info => InlineLayoutInfo into r8
1074
1075 payload_addr(obj, dst_temp, field_klass);
1076 pop(alloc_temp); // restore object being read from
1077 load_sized_value(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
1078 lea(tmp2, Address(alloc_temp, tmp2));
1079 // call_VM_leaf, clobbers a few regs, save restore new obj
1080 push(obj);
1081 // access_value_copy(IS_DEST_UNINITIALIZED, tmp2, dst_temp, field_klass);
1082 flat_field_copy(IS_DEST_UNINITIALIZED, tmp2, dst_temp, r8);
1083 pop(obj);
1084 jmp(done);
1085
1086 bind(alloc_failed);
1087 pop(obj);
1088 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
1089 obj, entry);
1090 get_vm_result(obj, r15_thread);
1091 bind(done);
1092 }
1093
1094 // Lock object
1095 //
1096 // Args:
1097 // rdx, c_rarg1: BasicObjectLock to be used for locking
1098 //
1099 // Kills:
1100 // rax, rbx
1101 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1102 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1103
1104 if (LockingMode == LM_MONITOR) {
1105 call_VM_preemptable(noreg,
1106 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1107 lock_reg);
1108 } else {
1109 Label count_locking, done, slow_case;
1110
1111 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1112 const Register tmp_reg = rbx;
1119 BasicLock::displaced_header_offset_in_bytes();
1120
1121 // Load object pointer into obj_reg
1122 movptr(obj_reg, Address(lock_reg, obj_offset));
1123
1124 if (DiagnoseSyncOnValueBasedClasses != 0) {
1125 load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1126 testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
1127 jcc(Assembler::notZero, slow_case);
1128 }
1129
1130 if (LockingMode == LM_LIGHTWEIGHT) {
1131 const Register thread = r15_thread;
1132 lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case);
1133 } else if (LockingMode == LM_LEGACY) {
1134 // Load immediate 1 into swap_reg %rax
1135 movl(swap_reg, 1);
1136
1137 // Load (object->mark() | 1) into swap_reg %rax
1138 orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1139 if (EnableValhalla) {
1140 // Mask inline_type bit such that we go to the slow path if object is an inline type
1141 andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
1142 }
1143
1144 // Save (object->mark() | 1) into BasicLock's displaced header
1145 movptr(Address(lock_reg, mark_offset), swap_reg);
1146
1147 assert(lock_offset == 0,
1148 "displaced header must be first word in BasicObjectLock");
1149
1150 lock();
1151 cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1152 jcc(Assembler::zero, count_locking);
1153
1154 const int zero_bits = 7;
1155
1156 // Fast check for recursive lock.
1157 //
1158 // Can apply the optimization only if this is a stack lock
1159 // allocated in this thread. For efficiency, we can focus on
1160 // recently allocated stack locks (instead of reading the stack
1161 // base and checking whether 'mark' points inside the current
1162 // thread stack):
1474 test_method_data_pointer(mdp, profile_continue);
1475
1476 // We are taking a branch. Increment the taken count.
1477 // We inline increment_mdp_data_at to return bumped_count in a register
1478 //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1479 Address data(mdp, in_bytes(JumpData::taken_offset()));
1480 movptr(bumped_count, data);
1481 assert(DataLayout::counter_increment == 1,
1482 "flow-free idiom only works with 1");
1483 addptr(bumped_count, DataLayout::counter_increment);
1484 sbbptr(bumped_count, 0);
1485 movptr(data, bumped_count); // Store back out
1486
1487 // The method data pointer needs to be updated to reflect the new target.
1488 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1489 bind(profile_continue);
1490 }
1491 }
1492
1493
1494 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1495 if (ProfileInterpreter) {
1496 Label profile_continue;
1497
1498 // If no method data exists, go to profile_continue.
1499 test_method_data_pointer(mdp, profile_continue);
1500
1501 // We are taking a branch. Increment the not taken count.
1502 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1503
1504 // The method data pointer needs to be updated to correspond to
1505 // the next bytecode
1506 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()): in_bytes(BranchData::branch_data_size()));
1507 bind(profile_continue);
1508 }
1509 }
1510
1511 void InterpreterMacroAssembler::profile_call(Register mdp) {
1512 if (ProfileInterpreter) {
1513 Label profile_continue;
1514
1515 // If no method data exists, go to profile_continue.
1516 test_method_data_pointer(mdp, profile_continue);
1517
1518 // We are making a call. Increment the count.
1519 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1520
1521 // The method data pointer needs to be updated to reflect the new target.
1522 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1523 bind(profile_continue);
1524 }
1525 }
1526
1549 Register reg2,
1550 bool receiver_can_be_null) {
1551 if (ProfileInterpreter) {
1552 Label profile_continue;
1553
1554 // If no method data exists, go to profile_continue.
1555 test_method_data_pointer(mdp, profile_continue);
1556
1557 Label skip_receiver_profile;
1558 if (receiver_can_be_null) {
1559 Label not_null;
1560 testptr(receiver, receiver);
1561 jccb(Assembler::notZero, not_null);
1562 // We are making a call. Increment the count for null receiver.
1563 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1564 jmp(skip_receiver_profile);
1565 bind(not_null);
1566 }
1567
1568 // Record the receiver type.
1569 record_klass_in_profile(receiver, mdp, reg2);
1570 bind(skip_receiver_profile);
1571
1572 // The method data pointer needs to be updated to reflect the new target.
1573 update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1574 bind(profile_continue);
1575 }
1576 }
1577
1578 // This routine creates a state machine for updating the multi-row
1579 // type profile at a virtual call site (or other type-sensitive bytecode).
1580 // The machine visits each row (of receiver/count) until the receiver type
1581 // is found, or until it runs out of rows. At the same time, it remembers
1582 // the location of the first empty row. (An empty row records null for its
1583 // receiver, and can be allocated for a newly-observed receiver type.)
1584 // Because there are two degrees of freedom in the state, a simple linear
1585 // search will not work; it must be a decision tree. Hence this helper
1586 // function is recursive, to generate the required tree structured code.
1587 // It's the interpreter, so we are trading off code space for speed.
1588 // See below for example code.
1589 void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register mdp,
1590 Register reg2, int start_row,
1591 Label& done) {
1592 if (TypeProfileWidth == 0) {
1593 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1594 } else {
1595 record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1596 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1597 }
1598 }
1599
1600 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
1601 Label& done, int total_rows,
1602 OffsetFunction item_offset_fn,
1603 OffsetFunction item_count_offset_fn) {
1604 int last_row = total_rows - 1;
1605 assert(start_row <= last_row, "must be work left to do");
1606 // Test this row for both the item and for null.
1607 // Take any of three different outcomes:
1608 // 1. found item => increment count and goto done
1609 // 2. found null => keep looking for case 1, maybe allocate this cell
1610 // 3. found something else => keep looking for cases 1 and 2
1611 // Case 3 is handled by a recursive call.
1675 // // inner copy of decision tree, rooted at row[1]
1676 // if (row[1].rec == rec) { row[1].incr(); goto done; }
1677 // if (row[1].rec != nullptr) {
1678 // // degenerate decision tree, rooted at row[2]
1679 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1680 // if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1681 // row[2].init(rec); goto done;
1682 // } else {
1683 // // remember row[1] is empty
1684 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1685 // row[1].init(rec); goto done;
1686 // }
1687 // } else {
1688 // // remember row[0] is empty
1689 // if (row[1].rec == rec) { row[1].incr(); goto done; }
1690 // if (row[2].rec == rec) { row[2].incr(); goto done; }
1691 // row[0].init(rec); goto done;
1692 // }
1693 // done:
1694
1695 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, Register mdp, Register reg2) {
1696 assert(ProfileInterpreter, "must be profiling");
1697 Label done;
1698
1699 record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
1700
1701 bind (done);
1702 }
1703
1704 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1705 Register mdp) {
1706 if (ProfileInterpreter) {
1707 Label profile_continue;
1708 uint row;
1709
1710 // If no method data exists, go to profile_continue.
1711 test_method_data_pointer(mdp, profile_continue);
1712
1713 // Update the total ret count.
1714 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1715
1716 for (row = 0; row < RetData::row_limit(); row++) {
1717 Label next_test;
1718
1719 // See if return_bci is equal to bci[n]:
1756 update_mdp_by_constant(mdp, mdp_delta);
1757
1758 bind(profile_continue);
1759 }
1760 }
1761
1762
1763 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1764 if (ProfileInterpreter) {
1765 Label profile_continue;
1766
1767 // If no method data exists, go to profile_continue.
1768 test_method_data_pointer(mdp, profile_continue);
1769
1770 // The method data pointer needs to be updated.
1771 int mdp_delta = in_bytes(BitData::bit_data_size());
1772 if (TypeProfileCasts) {
1773 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1774
1775 // Record the object type.
1776 record_klass_in_profile(klass, mdp, reg2);
1777 }
1778 update_mdp_by_constant(mdp, mdp_delta);
1779
1780 bind(profile_continue);
1781 }
1782 }
1783
1784
1785 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1786 if (ProfileInterpreter) {
1787 Label profile_continue;
1788
1789 // If no method data exists, go to profile_continue.
1790 test_method_data_pointer(mdp, profile_continue);
1791
1792 // Update the default case count
1793 increment_mdp_data_at(mdp,
1794 in_bytes(MultiBranchData::default_count_offset()));
1795
1796 // The method data pointer needs to be updated.
1816 // case_array_offset_in_bytes()
1817 movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1818 imulptr(index, reg2); // XXX l ?
1819 addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1820
1821 // Update the case count
1822 increment_mdp_data_at(mdp,
1823 index,
1824 in_bytes(MultiBranchData::relative_count_offset()));
1825
1826 // The method data pointer needs to be updated.
1827 update_mdp_by_offset(mdp,
1828 index,
1829 in_bytes(MultiBranchData::
1830 relative_displacement_offset()));
1831
1832 bind(profile_continue);
1833 }
1834 }
1835
1836 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1837 Register array,
1838 Register tmp) {
1839 if (ProfileInterpreter) {
1840 Label profile_continue;
1841
1842 // If no method data exists, go to profile_continue.
1843 test_method_data_pointer(mdp, profile_continue);
1844
1845 mov(tmp, array);
1846 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1847
1848 Label not_flat;
1849 test_non_flat_array_oop(array, tmp, not_flat);
1850
1851 set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1852
1853 bind(not_flat);
1854
1855 Label not_null_free;
1856 test_non_null_free_array_oop(array, tmp, not_null_free);
1857
1858 set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1859
1860 bind(not_null_free);
1861
1862 bind(profile_continue);
1863 }
1864 }
1865
1866 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1867 Register array,
1868 Register tmp);
1869 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1870 Register array,
1871 Register tmp);
1872
1873
1874 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1875 if (ProfileInterpreter) {
1876 Label profile_continue;
1877
1878 // If no method data exists, go to profile_continue.
1879 test_method_data_pointer(mdp, profile_continue);
1880
1881 Label done, update;
1882 testptr(element, element);
1883 jccb(Assembler::notZero, update);
1884 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1885 jmp(done);
1886
1887 bind(update);
1888 load_klass(tmp, element, rscratch1);
1889
1890 // Record the object type.
1891 record_klass_in_profile(tmp, mdp, tmp2);
1892
1893 bind(done);
1894
1895 // The method data pointer needs to be updated.
1896 update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1897
1898 bind(profile_continue);
1899 }
1900 }
1901
1902 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1903 Register element,
1904 Register tmp) {
1905 if (ProfileInterpreter) {
1906 Label profile_continue;
1907
1908 // If no method data exists, go to profile_continue.
1909 test_method_data_pointer(mdp, profile_continue);
1910
1911 mov(tmp, element);
1912 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1913
1914 // The method data pointer needs to be updated.
1915 update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1916
1917 bind(profile_continue);
1918 }
1919 }
1920
1921 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1922 Register left,
1923 Register right,
1924 Register tmp) {
1925 if (ProfileInterpreter) {
1926 Label profile_continue;
1927
1928 // If no method data exists, go to profile_continue.
1929 test_method_data_pointer(mdp, profile_continue);
1930
1931 mov(tmp, left);
1932 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1933
1934 Label left_not_inline_type;
1935 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1936 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1937 bind(left_not_inline_type);
1938
1939 mov(tmp, right);
1940 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1941
1942 Label right_not_inline_type;
1943 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1944 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1945 bind(right_not_inline_type);
1946
1947 bind(profile_continue);
1948 }
1949 }
1950
1951
1952 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1953 if (state == atos) {
1954 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1955 }
1956 }
1957
1958
1959 // Jump if ((*counter_addr += increment) & mask) == 0
1960 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1961 Register scratch, Label* where) {
1962 // This update is actually not atomic and can lose a number of updates
1963 // under heavy contention, but the alternative of using the (contended)
1964 // atomic update here penalizes profiling paths too much.
1965 movl(scratch, counter_addr);
1966 incrementl(scratch, InvocationCounter::count_increment);
1967 movl(counter_addr, scratch);
1968 andl(scratch, mask);
1969 if (where != nullptr) {
|