< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compiler_globals.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"

  31 #include "oops/markWord.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"

  34 #include "oops/resolvedFieldEntry.hpp"
  35 #include "oops/resolvedIndyEntry.hpp"
  36 #include "oops/resolvedMethodEntry.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "runtime/basicLock.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/javaThread.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/powerOfTwo.hpp"
  45 
  46 // Implementation of InterpreterMacroAssembler
  47 
  48 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  49   assert(entry, "Entry must have been generated by now");
  50   jump(RuntimeAddress(entry));
  51 }
  52 
  53 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {

 148         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 149         profile_obj_type(tmp, mdo_arg_addr);
 150 
 151         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 152         addptr(mdp, to_add);
 153         off_to_args += to_add;
 154       }
 155 
 156       if (MethodData::profile_return()) {
 157         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 158         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 159       }
 160 
 161       bind(done);
 162 
 163       if (MethodData::profile_return()) {
 164         // We're right after the type profile for the last
 165         // argument. tmp is the number of cells left in the
 166         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 167         // if there's a return to profile.
 168         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 169         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 170         addptr(mdp, tmp);
 171       }
 172       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 173     } else {
 174       assert(MethodData::profile_return(), "either profile call args or call ret");
 175       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 176     }
 177 
 178     // mdp points right after the end of the
 179     // CallTypeData/VirtualCallTypeData, right after the cells for the
 180     // return value type if there's one
 181 
 182     bind(profile_continue);
 183   }
 184 }
 185 
 186 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 187   assert_different_registers(mdp, ret, tmp, _bcp_register);
 188   if (ProfileInterpreter && MethodData::profile_return()) {

 193     if (MethodData::profile_return_jsr292_only()) {
 194       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 195 
 196       // If we don't profile all invoke bytecodes we must make sure
 197       // it's a bytecode we indeed profile. We can't go back to the
 198       // beginning of the ProfileData we intend to update to check its
 199       // type because we're right after it and we don't known its
 200       // length
 201       Label do_profile;
 202       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 203       jcc(Assembler::equal, do_profile);
 204       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 205       jcc(Assembler::equal, do_profile);
 206       get_method(tmp);
 207       cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 208       jcc(Assembler::notEqual, profile_continue);
 209 
 210       bind(do_profile);
 211     }
 212 
 213     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
 214     mov(tmp, ret);
 215     profile_obj_type(tmp, mdo_ret_addr);
 216 
 217     bind(profile_continue);
 218   }
 219 }
 220 
 221 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 222   if (ProfileInterpreter && MethodData::profile_parameters()) {
 223     Label profile_continue;
 224 
 225     test_method_data_pointer(mdp, profile_continue);
 226 
 227     // Load the offset of the area within the MDO used for
 228     // parameters. If it's negative we're not profiling any parameters
 229     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 230     testl(tmp1, tmp1);
 231     jcc(Assembler::negative, profile_continue);
 232 
 233     // Compute a pointer to the area for parameters from the offset

 499                                                              Register cpool,
 500                                                              Register index) {
 501   assert_different_registers(cpool, index);
 502 
 503   movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
 504   Register resolved_klasses = cpool;
 505   movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
 506   movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
 507 }
 508 
 509 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 510 // subtype of super_klass.
 511 //
 512 // Args:
 513 //      rax: superklass
 514 //      Rsub_klass: subklass
 515 //
 516 // Kills:
 517 //      rcx, rdi
 518 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 519                                                   Label& ok_is_subtype) {

 520   assert(Rsub_klass != rax, "rax holds superklass");
 521   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 522   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 523   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 524   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 525 
 526   // Profile the not-null value's klass.
 527   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi


 528 
 529   // Do the check.
 530   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 531 }
 532 
 533 
 534 // Java Expression Stack
 535 
 536 void InterpreterMacroAssembler::pop_ptr(Register r) {
 537   pop(r);
 538 }
 539 
 540 void InterpreterMacroAssembler::push_ptr(Register r) {
 541   push(r);
 542 }
 543 
 544 void InterpreterMacroAssembler::push_i(Register r) {
 545   push(r);
 546 }
 547 

 793 //       no error processing
 794 void InterpreterMacroAssembler::remove_activation(TosState state,
 795                                                   Register ret_addr,
 796                                                   bool throw_monitor_exception,
 797                                                   bool install_monitor_exception,
 798                                                   bool notify_jvmdi) {
 799   // Note: Registers rdx xmm0 may be in use for the
 800   // result check if synchronized method
 801   Label unlocked, unlock, no_unlock;
 802 
 803   const Register rthread = r15_thread;
 804   const Register robj    = c_rarg1;
 805   const Register rmon    = c_rarg1;
 806 
 807   // get the value of _do_not_unlock_if_synchronized into rdx
 808   const Address do_not_unlock_if_synchronized(rthread,
 809     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 810   movbool(rbx, do_not_unlock_if_synchronized);
 811   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 812 
 813  // get method access flags
 814   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 815   load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
 816   testl(rcx, JVM_ACC_SYNCHRONIZED);
 817   jcc(Assembler::zero, unlocked);
 818 
 819   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 820   // is set.
 821   testbool(rbx);
 822   jcc(Assembler::notZero, no_unlock);
 823 
 824   // unlock monitor
 825   push(state); // save result
 826 
 827   // BasicObjectLock will be first in list, since this is a
 828   // synchronized method. However, need to check that the object has
 829   // not been unlocked by an explicit monitorexit bytecode.
 830   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
 831                         wordSize - (int) sizeof(BasicObjectLock));
 832   // We use c_rarg1/rdx so that if we go slow path it will be the correct
 833   // register for unlock_object to pass to VM directly

 932   // the stack, will call InterpreterRuntime::at_unwind.
 933   Label slow_path;
 934   Label fast_path;
 935   safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
 936   jmp(fast_path);
 937   bind(slow_path);
 938   push(state);
 939   set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
 940   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
 941   reset_last_Java_frame(true);
 942   pop(state);
 943   bind(fast_path);
 944 
 945   // JVMTI support. Make sure the safepoint poll test is issued prior.
 946   if (notify_jvmdi) {
 947     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 948   } else {
 949     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 950   }
 951 
 952   // remove activation
 953   // get sender sp
 954   movptr(rbx,
 955          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 956   if (StackReservedPages > 0) {


 957     // testing if reserved zone needs to be re-enabled
 958     Register rthread = r15_thread;
 959     Label no_reserved_zone_enabling;
 960 
 961     // check if already enabled - if so no re-enabling needed
 962     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 963     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
 964     jcc(Assembler::equal, no_reserved_zone_enabling);
 965 
 966     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 967     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
 968 
 969     JFR_ONLY(leave_jfr_critical_section();)
 970 
 971     call_VM_leaf(
 972       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 973     call_VM(noreg, CAST_FROM_FN_PTR(address,
 974                    InterpreterRuntime::throw_delayed_StackOverflowError));
 975     should_not_reach_here();
 976 
 977     bind(no_reserved_zone_enabling);
 978   }
 979 




















































 980   leave();                           // remove frame anchor
 981 
 982   JFR_ONLY(leave_jfr_critical_section();)
 983 
 984   pop(ret_addr);                     // get return address
 985   mov(rsp, rbx);                     // set sp to sender sp
 986   pop_cont_fastpath();
 987 
 988 }
 989 
 990 #if INCLUDE_JFR
 991 void InterpreterMacroAssembler::enter_jfr_critical_section() {
 992   const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
 993   movbool(sampling_critical_section, true);
 994 }
 995 
 996 void InterpreterMacroAssembler::leave_jfr_critical_section() {
 997   const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
 998   movbool(sampling_critical_section, false);
 999 }
1000 #endif // INCLUDE_JFR
1001 
1002 void InterpreterMacroAssembler::get_method_counters(Register method,
1003                                                     Register mcs, Label& skip) {
1004   Label has_counters;
1005   movptr(mcs, Address(method, Method::method_counters_offset()));
1006   testptr(mcs, mcs);
1007   jcc(Assembler::notZero, has_counters);
1008   call_VM(noreg, CAST_FROM_FN_PTR(address,
1009           InterpreterRuntime::build_method_counters), method);
1010   movptr(mcs, Address(method,Method::method_counters_offset()));
1011   testptr(mcs, mcs);
1012   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1013   bind(has_counters);
1014 }
1015 

























































































1016 
1017 // Lock object
1018 //
1019 // Args:
1020 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1021 //
1022 // Kills:
1023 //      rax, rbx
1024 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1025   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1026 
1027   if (LockingMode == LM_MONITOR) {
1028     call_VM_preemptable(noreg,
1029             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1030             lock_reg);
1031   } else {
1032     Label count_locking, done, slow_case;
1033 
1034     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1035     const Register tmp_reg = rbx;

1041     const int mark_offset = lock_offset +
1042                             BasicLock::displaced_header_offset_in_bytes();
1043 
1044     // Load object pointer into obj_reg
1045     movptr(obj_reg, Address(lock_reg, obj_offset));
1046 
1047     if (LockingMode == LM_LIGHTWEIGHT) {
1048       lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
1049     } else if (LockingMode == LM_LEGACY) {
1050       if (DiagnoseSyncOnValueBasedClasses != 0) {
1051         load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1052         testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
1053         jcc(Assembler::notZero, slow_case);
1054       }
1055 
1056       // Load immediate 1 into swap_reg %rax
1057       movl(swap_reg, 1);
1058 
1059       // Load (object->mark() | 1) into swap_reg %rax
1060       orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




1061 
1062       // Save (object->mark() | 1) into BasicLock's displaced header
1063       movptr(Address(lock_reg, mark_offset), swap_reg);
1064 
1065       assert(lock_offset == 0,
1066              "displaced header must be first word in BasicObjectLock");
1067 
1068       lock();
1069       cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1070       jcc(Assembler::zero, count_locking);
1071 
1072       const int zero_bits = 7;
1073 
1074       // Fast check for recursive lock.
1075       //
1076       // Can apply the optimization only if this is a stack lock
1077       // allocated in this thread. For efficiency, we can focus on
1078       // recently allocated stack locks (instead of reading the stack
1079       // base and checking whether 'mark' points inside the current
1080       // thread stack):

1355 }
1356 
1357 
1358 void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
1359   if (ProfileInterpreter) {
1360     Label profile_continue;
1361 
1362     // If no method data exists, go to profile_continue.
1363     test_method_data_pointer(mdp, profile_continue);
1364 
1365     // We are taking a branch.  Increment the taken count.
1366     increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1367 
1368     // The method data pointer needs to be updated to reflect the new target.
1369     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1370     bind(profile_continue);
1371   }
1372 }
1373 
1374 
1375 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1376   if (ProfileInterpreter) {
1377     Label profile_continue;
1378 
1379     // If no method data exists, go to profile_continue.
1380     test_method_data_pointer(mdp, profile_continue);
1381 
1382     // We are not taking a branch.  Increment the not taken count.
1383     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1384 
1385     // The method data pointer needs to be updated to correspond to
1386     // the next bytecode
1387     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1388     bind(profile_continue);
1389   }
1390 }
1391 
1392 void InterpreterMacroAssembler::profile_call(Register mdp) {
1393   if (ProfileInterpreter) {
1394     Label profile_continue;
1395 
1396     // If no method data exists, go to profile_continue.
1397     test_method_data_pointer(mdp, profile_continue);
1398 
1399     // We are making a call.  Increment the count.
1400     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1401 
1402     // The method data pointer needs to be updated to reflect the new target.
1403     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1404     bind(profile_continue);
1405   }
1406 }
1407 

1430                                                      Register reg2,
1431                                                      bool receiver_can_be_null) {
1432   if (ProfileInterpreter) {
1433     Label profile_continue;
1434 
1435     // If no method data exists, go to profile_continue.
1436     test_method_data_pointer(mdp, profile_continue);
1437 
1438     Label skip_receiver_profile;
1439     if (receiver_can_be_null) {
1440       Label not_null;
1441       testptr(receiver, receiver);
1442       jccb(Assembler::notZero, not_null);
1443       // We are making a call.  Increment the count for null receiver.
1444       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1445       jmp(skip_receiver_profile);
1446       bind(not_null);
1447     }
1448 
1449     // Record the receiver type.
1450     record_klass_in_profile(receiver, mdp, reg2, true);
1451     bind(skip_receiver_profile);
1452 
1453     // The method data pointer needs to be updated to reflect the new target.
1454     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1455     bind(profile_continue);
1456   }
1457 }
1458 
1459 // This routine creates a state machine for updating the multi-row
1460 // type profile at a virtual call site (or other type-sensitive bytecode).
1461 // The machine visits each row (of receiver/count) until the receiver type
1462 // is found, or until it runs out of rows.  At the same time, it remembers
1463 // the location of the first empty row.  (An empty row records null for its
1464 // receiver, and can be allocated for a newly-observed receiver type.)
1465 // Because there are two degrees of freedom in the state, a simple linear
1466 // search will not work; it must be a decision tree.  Hence this helper
1467 // function is recursive, to generate the required tree structured code.
1468 // It's the interpreter, so we are trading off code space for speed.
1469 // See below for example code.
1470 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1471                                         Register receiver, Register mdp,
1472                                         Register reg2, int start_row,
1473                                         Label& done, bool is_virtual_call) {
1474   if (TypeProfileWidth == 0) {
1475     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1476   } else {
1477     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1478                                   &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1479   }
1480 }
1481 
1482 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
1483                                                               Label& done, int total_rows,
1484                                                               OffsetFunction item_offset_fn,
1485                                                               OffsetFunction item_count_offset_fn) {
1486   int last_row = total_rows - 1;
1487   assert(start_row <= last_row, "must be work left to do");
1488   // Test this row for both the item and for null.
1489   // Take any of three different outcomes:
1490   //   1. found item => increment count and goto done
1491   //   2. found null => keep looking for case 1, maybe allocate this cell
1492   //   3. found something else => keep looking for cases 1 and 2
1493   // Case 3 is handled by a recursive call.

1557 //     // inner copy of decision tree, rooted at row[1]
1558 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1559 //     if (row[1].rec != nullptr) {
1560 //       // degenerate decision tree, rooted at row[2]
1561 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1562 //       if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1563 //       row[2].init(rec); goto done;
1564 //     } else {
1565 //       // remember row[1] is empty
1566 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1567 //       row[1].init(rec); goto done;
1568 //     }
1569 //   } else {
1570 //     // remember row[0] is empty
1571 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1572 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1573 //     row[0].init(rec); goto done;
1574 //   }
1575 //   done:
1576 
1577 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1578                                                         Register mdp, Register reg2,
1579                                                         bool is_virtual_call) {
1580   assert(ProfileInterpreter, "must be profiling");
1581   Label done;
1582 
1583   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
1584 
1585   bind (done);
1586 }
1587 
1588 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1589                                             Register mdp) {
1590   if (ProfileInterpreter) {
1591     Label profile_continue;
1592     uint row;
1593 
1594     // If no method data exists, go to profile_continue.
1595     test_method_data_pointer(mdp, profile_continue);
1596 
1597     // Update the total ret count.
1598     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1599 
1600     for (row = 0; row < RetData::row_limit(); row++) {
1601       Label next_test;
1602 
1603       // See if return_bci is equal to bci[n]:

1640     update_mdp_by_constant(mdp, mdp_delta);
1641 
1642     bind(profile_continue);
1643   }
1644 }
1645 
1646 
1647 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1648   if (ProfileInterpreter) {
1649     Label profile_continue;
1650 
1651     // If no method data exists, go to profile_continue.
1652     test_method_data_pointer(mdp, profile_continue);
1653 
1654     // The method data pointer needs to be updated.
1655     int mdp_delta = in_bytes(BitData::bit_data_size());
1656     if (TypeProfileCasts) {
1657       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1658 
1659       // Record the object type.
1660       record_klass_in_profile(klass, mdp, reg2, false);
1661     }
1662     update_mdp_by_constant(mdp, mdp_delta);
1663 
1664     bind(profile_continue);
1665   }
1666 }
1667 
1668 
1669 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1670   if (ProfileInterpreter) {
1671     Label profile_continue;
1672 
1673     // If no method data exists, go to profile_continue.
1674     test_method_data_pointer(mdp, profile_continue);
1675 
1676     // Update the default case count
1677     increment_mdp_data_at(mdp,
1678                           in_bytes(MultiBranchData::default_count_offset()));
1679 
1680     // The method data pointer needs to be updated.

1700     // case_array_offset_in_bytes()
1701     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1702     imulptr(index, reg2); // XXX l ?
1703     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1704 
1705     // Update the case count
1706     increment_mdp_data_at(mdp,
1707                           index,
1708                           in_bytes(MultiBranchData::relative_count_offset()));
1709 
1710     // The method data pointer needs to be updated.
1711     update_mdp_by_offset(mdp,
1712                          index,
1713                          in_bytes(MultiBranchData::
1714                                   relative_displacement_offset()));
1715 
1716     bind(profile_continue);
1717   }
1718 }
1719 


















































































































1720 
1721 
1722 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1723   if (state == atos) {
1724     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1725   }
1726 }
1727 
1728 
1729 // Jump if ((*counter_addr += increment) & mask) == 0
1730 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1731                                                         Register scratch, Label* where) {
1732   // This update is actually not atomic and can lose a number of updates
1733   // under heavy contention, but the alternative of using the (contended)
1734   // atomic update here penalizes profiling paths too much.
1735   movl(scratch, counter_addr);
1736   incrementl(scratch, InvocationCounter::count_increment);
1737   movl(counter_addr, scratch);
1738   andl(scratch, mask);
1739   if (where != nullptr) {

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compiler_globals.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/constMethodFlags.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/inlineKlass.hpp"
  36 #include "oops/resolvedFieldEntry.hpp"
  37 #include "oops/resolvedIndyEntry.hpp"
  38 #include "oops/resolvedMethodEntry.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/javaThread.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/powerOfTwo.hpp"
  47 
  48 // Implementation of InterpreterMacroAssembler
  49 
  50 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  51   assert(entry, "Entry must have been generated by now");
  52   jump(RuntimeAddress(entry));
  53 }
  54 
  55 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {

 150         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 151         profile_obj_type(tmp, mdo_arg_addr);
 152 
 153         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 154         addptr(mdp, to_add);
 155         off_to_args += to_add;
 156       }
 157 
 158       if (MethodData::profile_return()) {
 159         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 160         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 161       }
 162 
 163       bind(done);
 164 
 165       if (MethodData::profile_return()) {
 166         // We're right after the type profile for the last
 167         // argument. tmp is the number of cells left in the
 168         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 169         // if there's a return to profile.
 170         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 171         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 172         addptr(mdp, tmp);
 173       }
 174       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 175     } else {
 176       assert(MethodData::profile_return(), "either profile call args or call ret");
 177       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 178     }
 179 
 180     // mdp points right after the end of the
 181     // CallTypeData/VirtualCallTypeData, right after the cells for the
 182     // return value type if there's one
 183 
 184     bind(profile_continue);
 185   }
 186 }
 187 
 188 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 189   assert_different_registers(mdp, ret, tmp, _bcp_register);
 190   if (ProfileInterpreter && MethodData::profile_return()) {

 195     if (MethodData::profile_return_jsr292_only()) {
 196       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 197 
 198       // If we don't profile all invoke bytecodes we must make sure
 199       // it's a bytecode we indeed profile. We can't go back to the
 200       // beginning of the ProfileData we intend to update to check its
 201       // type because we're right after it and we don't known its
 202       // length
 203       Label do_profile;
 204       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 205       jcc(Assembler::equal, do_profile);
 206       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 207       jcc(Assembler::equal, do_profile);
 208       get_method(tmp);
 209       cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 210       jcc(Assembler::notEqual, profile_continue);
 211 
 212       bind(do_profile);
 213     }
 214 
 215     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
 216     mov(tmp, ret);
 217     profile_obj_type(tmp, mdo_ret_addr);
 218 
 219     bind(profile_continue);
 220   }
 221 }
 222 
 223 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 224   if (ProfileInterpreter && MethodData::profile_parameters()) {
 225     Label profile_continue;
 226 
 227     test_method_data_pointer(mdp, profile_continue);
 228 
 229     // Load the offset of the area within the MDO used for
 230     // parameters. If it's negative we're not profiling any parameters
 231     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 232     testl(tmp1, tmp1);
 233     jcc(Assembler::negative, profile_continue);
 234 
 235     // Compute a pointer to the area for parameters from the offset

 501                                                              Register cpool,
 502                                                              Register index) {
 503   assert_different_registers(cpool, index);
 504 
 505   movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
 506   Register resolved_klasses = cpool;
 507   movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
 508   movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
 509 }
 510 
 511 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 512 // subtype of super_klass.
 513 //
 514 // Args:
 515 //      rax: superklass
 516 //      Rsub_klass: subklass
 517 //
 518 // Kills:
 519 //      rcx, rdi
 520 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 521                                                   Label& ok_is_subtype,
 522                                                   bool profile) {
 523   assert(Rsub_klass != rax, "rax holds superklass");
 524   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 525   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 526   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 527   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 528 
 529   // Profile the not-null value's klass.
 530   if (profile) {
 531     profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
 532   }
 533 
 534   // Do the check.
 535   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 536 }
 537 
 538 
 539 // Java Expression Stack
 540 
 541 void InterpreterMacroAssembler::pop_ptr(Register r) {
 542   pop(r);
 543 }
 544 
 545 void InterpreterMacroAssembler::push_ptr(Register r) {
 546   push(r);
 547 }
 548 
 549 void InterpreterMacroAssembler::push_i(Register r) {
 550   push(r);
 551 }
 552 

 798 //       no error processing
 799 void InterpreterMacroAssembler::remove_activation(TosState state,
 800                                                   Register ret_addr,
 801                                                   bool throw_monitor_exception,
 802                                                   bool install_monitor_exception,
 803                                                   bool notify_jvmdi) {
 804   // Note: Registers rdx xmm0 may be in use for the
 805   // result check if synchronized method
 806   Label unlocked, unlock, no_unlock;
 807 
 808   const Register rthread = r15_thread;
 809   const Register robj    = c_rarg1;
 810   const Register rmon    = c_rarg1;
 811 
 812   // get the value of _do_not_unlock_if_synchronized into rdx
 813   const Address do_not_unlock_if_synchronized(rthread,
 814     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 815   movbool(rbx, do_not_unlock_if_synchronized);
 816   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 817 
 818   // get method access flags
 819   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 820   load_unsigned_short(rcx, Address(rcx, Method::access_flags_offset()));
 821   testl(rcx, JVM_ACC_SYNCHRONIZED);
 822   jcc(Assembler::zero, unlocked);
 823 
 824   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 825   // is set.
 826   testbool(rbx);
 827   jcc(Assembler::notZero, no_unlock);
 828 
 829   // unlock monitor
 830   push(state); // save result
 831 
 832   // BasicObjectLock will be first in list, since this is a
 833   // synchronized method. However, need to check that the object has
 834   // not been unlocked by an explicit monitorexit bytecode.
 835   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
 836                         wordSize - (int) sizeof(BasicObjectLock));
 837   // We use c_rarg1/rdx so that if we go slow path it will be the correct
 838   // register for unlock_object to pass to VM directly

 937   // the stack, will call InterpreterRuntime::at_unwind.
 938   Label slow_path;
 939   Label fast_path;
 940   safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
 941   jmp(fast_path);
 942   bind(slow_path);
 943   push(state);
 944   set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
 945   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
 946   reset_last_Java_frame(true);
 947   pop(state);
 948   bind(fast_path);
 949 
 950   // JVMTI support. Make sure the safepoint poll test is issued prior.
 951   if (notify_jvmdi) {
 952     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 953   } else {
 954     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 955   }
 956 




 957   if (StackReservedPages > 0) {
 958     movptr(rbx,
 959                Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 960     // testing if reserved zone needs to be re-enabled
 961     Register rthread = r15_thread;
 962     Label no_reserved_zone_enabling;
 963 
 964     // check if already enabled - if so no re-enabling needed
 965     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 966     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
 967     jcc(Assembler::equal, no_reserved_zone_enabling);
 968 
 969     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 970     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
 971 
 972     JFR_ONLY(leave_jfr_critical_section();)
 973 
 974     call_VM_leaf(
 975       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 976     call_VM(noreg, CAST_FROM_FN_PTR(address,
 977                    InterpreterRuntime::throw_delayed_StackOverflowError));
 978     should_not_reach_here();
 979 
 980     bind(no_reserved_zone_enabling);
 981   }
 982 
 983   // remove activation
 984   // get sender sp
 985   movptr(rbx,
 986          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
 987 
 988   if (state == atos && InlineTypeReturnedAsFields) {
 989     Label skip;
 990     Label not_null;
 991     testptr(rax, rax);
 992     jcc(Assembler::notZero, not_null);
 993     // Returned value is null, zero all return registers because they may belong to oop fields
 994     xorq(j_rarg1, j_rarg1);
 995     xorq(j_rarg2, j_rarg2);
 996     xorq(j_rarg3, j_rarg3);
 997     xorq(j_rarg4, j_rarg4);
 998     xorq(j_rarg5, j_rarg5);
 999     jmp(skip);
1000     bind(not_null);
1001 
1002     // Check if we are returning an non-null inline type and load its fields into registers
1003     test_oop_is_not_inline_type(rax, rscratch1, skip, /* can_be_null= */ false);
1004 
1005 #ifndef _LP64
1006     super_call_VM_leaf(StubRoutines::load_inline_type_fields_in_regs());
1007 #else
1008     // Load fields from a buffered value with an inline class specific handler
1009     load_klass(rdi, rax, rscratch1);
1010     movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
1011     movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
1012     // Unpack handler can be null if inline type is not scalarizable in returns
1013     testptr(rdi, rdi);
1014     jcc(Assembler::zero, skip);
1015     call(rdi);
1016 #endif
1017 #ifdef ASSERT
1018     // TODO 8284443 Enable
1019     if (StressCallingConvention && false) {
1020       Label skip_stress;
1021       movptr(rscratch1, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1022       movl(rscratch1, Address(rscratch1, Method::flags_offset()));
1023       testl(rcx, MethodFlags::has_scalarized_return_flag());
1024       jcc(Assembler::zero, skip_stress);
1025       load_klass(rax, rax, rscratch1);
1026       orptr(rax, 1);
1027       bind(skip_stress);
1028     }
1029 #endif
1030     // call above kills the value in rbx. Reload it.
1031     movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1032     bind(skip);
1033   }
1034 
1035   leave();                           // remove frame anchor
1036 
1037   JFR_ONLY(leave_jfr_critical_section();)
1038 
1039   pop(ret_addr);                     // get return address
1040   mov(rsp, rbx);                     // set sp to sender sp
1041   pop_cont_fastpath();
1042 
1043 }
1044 
1045 #if INCLUDE_JFR
1046 void InterpreterMacroAssembler::enter_jfr_critical_section() {
1047   const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1048   movbool(sampling_critical_section, true);
1049 }
1050 
1051 void InterpreterMacroAssembler::leave_jfr_critical_section() {
1052   const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
1053   movbool(sampling_critical_section, false);
1054 }
1055 #endif // INCLUDE_JFR
1056 
1057 void InterpreterMacroAssembler::get_method_counters(Register method,
1058                                                     Register mcs, Label& skip) {
1059   Label has_counters;
1060   movptr(mcs, Address(method, Method::method_counters_offset()));
1061   testptr(mcs, mcs);
1062   jcc(Assembler::notZero, has_counters);
1063   call_VM(noreg, CAST_FROM_FN_PTR(address,
1064           InterpreterRuntime::build_method_counters), method);
1065   movptr(mcs, Address(method,Method::method_counters_offset()));
1066   testptr(mcs, mcs);
1067   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1068   bind(has_counters);
1069 }
1070 
1071 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
1072                                                   Register t1, Register t2,
1073                                                   bool clear_fields, Label& alloc_failed) {
1074   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
1075   if (DTraceAllocProbes) {
1076     // Trigger dtrace event for fastpath
1077     push(atos);
1078     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
1079     pop(atos);
1080   }
1081 }
1082 
1083 void InterpreterMacroAssembler::read_flat_field(Register entry, Register tmp1, Register tmp2, Register obj) {
1084   Label alloc_failed, slow_path, done;
1085   const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1086   const Register dst_temp   = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1087   assert_different_registers(obj, entry, tmp1, tmp2, dst_temp, r8, r9);
1088 
1089   // If the field is nullable, jump to slow path
1090   load_unsigned_byte(tmp1, Address(entry, in_bytes(ResolvedFieldEntry::flags_offset())));
1091   testl(tmp1, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift);
1092   jcc(Assembler::equal, slow_path);
1093 
1094   // Grap the inline field klass
1095   const Register field_klass = tmp1;
1096   load_unsigned_short(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1097 
1098   movptr(tmp1, Address(entry, ResolvedFieldEntry::field_holder_offset()));
1099   get_inline_type_field_klass(tmp1, tmp2, field_klass);
1100 
1101   // allocate buffer
1102   push(obj);  // push object being read from
1103   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1104 
1105   // Have an oop instance buffer, copy into it
1106   load_unsigned_short(r9, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1107   movptr(r8, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
1108   inline_layout_info(r8, r9, r8); // holder, index, info => InlineLayoutInfo into r8
1109 
1110   payload_addr(obj, dst_temp, field_klass);
1111   pop(alloc_temp);             // restore object being read from
1112   load_sized_value(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
1113   lea(tmp2, Address(alloc_temp, tmp2));
1114   // call_VM_leaf, clobbers a few regs, save restore new obj
1115   push(obj);
1116   flat_field_copy(IS_DEST_UNINITIALIZED, tmp2, dst_temp, r8);
1117   pop(obj);
1118   jmp(done);
1119 
1120   bind(alloc_failed);
1121   pop(obj);
1122   bind(slow_path);
1123   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
1124           obj, entry);
1125   get_vm_result_oop(obj);
1126   bind(done);
1127 }
1128 
1129 void InterpreterMacroAssembler::write_flat_field(Register entry, Register tmp1, Register tmp2,
1130                                                  Register obj, Register off, Register value) {
1131   assert_different_registers(entry, tmp1, tmp2, obj, off, value);
1132 
1133   Label slow_path, done;
1134 
1135   load_unsigned_byte(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::flags_offset())));
1136   test_field_is_not_null_free_inline_type(tmp2, tmp1, slow_path);
1137 
1138   null_check(value); // FIXME JDK-8341120
1139 
1140   lea(obj, Address(obj, off, Address::times_1));
1141 
1142   load_klass(tmp2, value, tmp1);
1143   payload_addr(value, value, tmp2);
1144 
1145   Register idx = tmp1;
1146   load_unsigned_short(idx, Address(entry, in_bytes(ResolvedFieldEntry::field_index_offset())));
1147   movptr(tmp2, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
1148 
1149   Register layout_info = off;
1150   inline_layout_info(tmp2, idx, layout_info);
1151 
1152   flat_field_copy(IN_HEAP, value, obj, layout_info);
1153   jmp(done);
1154 
1155   bind(slow_path);
1156   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flat_field), obj, value, entry);
1157 
1158   bind(done);
1159 }
1160 
1161 // Lock object
1162 //
1163 // Args:
1164 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1165 //
1166 // Kills:
1167 //      rax, rbx
1168 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1169   assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
1170 
1171   if (LockingMode == LM_MONITOR) {
1172     call_VM_preemptable(noreg,
1173             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1174             lock_reg);
1175   } else {
1176     Label count_locking, done, slow_case;
1177 
1178     const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1179     const Register tmp_reg = rbx;

1185     const int mark_offset = lock_offset +
1186                             BasicLock::displaced_header_offset_in_bytes();
1187 
1188     // Load object pointer into obj_reg
1189     movptr(obj_reg, Address(lock_reg, obj_offset));
1190 
1191     if (LockingMode == LM_LIGHTWEIGHT) {
1192       lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
1193     } else if (LockingMode == LM_LEGACY) {
1194       if (DiagnoseSyncOnValueBasedClasses != 0) {
1195         load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1196         testb(Address(tmp_reg, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class);
1197         jcc(Assembler::notZero, slow_case);
1198       }
1199 
1200       // Load immediate 1 into swap_reg %rax
1201       movl(swap_reg, 1);
1202 
1203       // Load (object->mark() | 1) into swap_reg %rax
1204       orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1205       if (EnableValhalla) {
1206         // Mask inline_type bit such that we go to the slow path if object is an inline type
1207         andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
1208       }
1209 
1210       // Save (object->mark() | 1) into BasicLock's displaced header
1211       movptr(Address(lock_reg, mark_offset), swap_reg);
1212 
1213       assert(lock_offset == 0,
1214              "displaced header must be first word in BasicObjectLock");
1215 
1216       lock();
1217       cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1218       jcc(Assembler::zero, count_locking);
1219 
1220       const int zero_bits = 7;
1221 
1222       // Fast check for recursive lock.
1223       //
1224       // Can apply the optimization only if this is a stack lock
1225       // allocated in this thread. For efficiency, we can focus on
1226       // recently allocated stack locks (instead of reading the stack
1227       // base and checking whether 'mark' points inside the current
1228       // thread stack):

1503 }
1504 
1505 
1506 void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
1507   if (ProfileInterpreter) {
1508     Label profile_continue;
1509 
1510     // If no method data exists, go to profile_continue.
1511     test_method_data_pointer(mdp, profile_continue);
1512 
1513     // We are taking a branch.  Increment the taken count.
1514     increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1515 
1516     // The method data pointer needs to be updated to reflect the new target.
1517     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1518     bind(profile_continue);
1519   }
1520 }
1521 
1522 
1523 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1524   if (ProfileInterpreter) {
1525     Label profile_continue;
1526 
1527     // If no method data exists, go to profile_continue.
1528     test_method_data_pointer(mdp, profile_continue);
1529 
1530     // We are not taking a branch.  Increment the not taken count.
1531     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1532 
1533     // The method data pointer needs to be updated to correspond to
1534     // the next bytecode
1535     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()): in_bytes(BranchData::branch_data_size()));
1536     bind(profile_continue);
1537   }
1538 }
1539 
1540 void InterpreterMacroAssembler::profile_call(Register mdp) {
1541   if (ProfileInterpreter) {
1542     Label profile_continue;
1543 
1544     // If no method data exists, go to profile_continue.
1545     test_method_data_pointer(mdp, profile_continue);
1546 
1547     // We are making a call.  Increment the count.
1548     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1549 
1550     // The method data pointer needs to be updated to reflect the new target.
1551     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1552     bind(profile_continue);
1553   }
1554 }
1555 

1578                                                      Register reg2,
1579                                                      bool receiver_can_be_null) {
1580   if (ProfileInterpreter) {
1581     Label profile_continue;
1582 
1583     // If no method data exists, go to profile_continue.
1584     test_method_data_pointer(mdp, profile_continue);
1585 
1586     Label skip_receiver_profile;
1587     if (receiver_can_be_null) {
1588       Label not_null;
1589       testptr(receiver, receiver);
1590       jccb(Assembler::notZero, not_null);
1591       // We are making a call.  Increment the count for null receiver.
1592       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1593       jmp(skip_receiver_profile);
1594       bind(not_null);
1595     }
1596 
1597     // Record the receiver type.
1598     record_klass_in_profile(receiver, mdp, reg2);
1599     bind(skip_receiver_profile);
1600 
1601     // The method data pointer needs to be updated to reflect the new target.
1602     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1603     bind(profile_continue);
1604   }
1605 }
1606 
1607 // This routine creates a state machine for updating the multi-row
1608 // type profile at a virtual call site (or other type-sensitive bytecode).
1609 // The machine visits each row (of receiver/count) until the receiver type
1610 // is found, or until it runs out of rows.  At the same time, it remembers
1611 // the location of the first empty row.  (An empty row records null for its
1612 // receiver, and can be allocated for a newly-observed receiver type.)
1613 // Because there are two degrees of freedom in the state, a simple linear
1614 // search will not work; it must be a decision tree.  Hence this helper
1615 // function is recursive, to generate the required tree structured code.
1616 // It's the interpreter, so we are trading off code space for speed.
1617 // See below for example code.
1618 void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register mdp,
1619                                                                Register reg2, int start_row,
1620                                                                Label& done) {

1621   if (TypeProfileWidth == 0) {
1622     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1623   } else {
1624     record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
1625                                   &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset);
1626   }
1627 }
1628 
1629 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row,
1630                                                               Label& done, int total_rows,
1631                                                               OffsetFunction item_offset_fn,
1632                                                               OffsetFunction item_count_offset_fn) {
1633   int last_row = total_rows - 1;
1634   assert(start_row <= last_row, "must be work left to do");
1635   // Test this row for both the item and for null.
1636   // Take any of three different outcomes:
1637   //   1. found item => increment count and goto done
1638   //   2. found null => keep looking for case 1, maybe allocate this cell
1639   //   3. found something else => keep looking for cases 1 and 2
1640   // Case 3 is handled by a recursive call.

1704 //     // inner copy of decision tree, rooted at row[1]
1705 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1706 //     if (row[1].rec != nullptr) {
1707 //       // degenerate decision tree, rooted at row[2]
1708 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1709 //       if (row[2].rec != nullptr) { count.incr(); goto done; } // overflow
1710 //       row[2].init(rec); goto done;
1711 //     } else {
1712 //       // remember row[1] is empty
1713 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
1714 //       row[1].init(rec); goto done;
1715 //     }
1716 //   } else {
1717 //     // remember row[0] is empty
1718 //     if (row[1].rec == rec) { row[1].incr(); goto done; }
1719 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
1720 //     row[0].init(rec); goto done;
1721 //   }
1722 //   done:
1723 
1724 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, Register mdp, Register reg2) {


1725   assert(ProfileInterpreter, "must be profiling");
1726   Label done;
1727 
1728   record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
1729 
1730   bind (done);
1731 }
1732 
1733 void InterpreterMacroAssembler::profile_ret(Register return_bci,
1734                                             Register mdp) {
1735   if (ProfileInterpreter) {
1736     Label profile_continue;
1737     uint row;
1738 
1739     // If no method data exists, go to profile_continue.
1740     test_method_data_pointer(mdp, profile_continue);
1741 
1742     // Update the total ret count.
1743     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1744 
1745     for (row = 0; row < RetData::row_limit(); row++) {
1746       Label next_test;
1747 
1748       // See if return_bci is equal to bci[n]:

1785     update_mdp_by_constant(mdp, mdp_delta);
1786 
1787     bind(profile_continue);
1788   }
1789 }
1790 
1791 
1792 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
1793   if (ProfileInterpreter) {
1794     Label profile_continue;
1795 
1796     // If no method data exists, go to profile_continue.
1797     test_method_data_pointer(mdp, profile_continue);
1798 
1799     // The method data pointer needs to be updated.
1800     int mdp_delta = in_bytes(BitData::bit_data_size());
1801     if (TypeProfileCasts) {
1802       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1803 
1804       // Record the object type.
1805       record_klass_in_profile(klass, mdp, reg2);
1806     }
1807     update_mdp_by_constant(mdp, mdp_delta);
1808 
1809     bind(profile_continue);
1810   }
1811 }
1812 
1813 
1814 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1815   if (ProfileInterpreter) {
1816     Label profile_continue;
1817 
1818     // If no method data exists, go to profile_continue.
1819     test_method_data_pointer(mdp, profile_continue);
1820 
1821     // Update the default case count
1822     increment_mdp_data_at(mdp,
1823                           in_bytes(MultiBranchData::default_count_offset()));
1824 
1825     // The method data pointer needs to be updated.

1845     // case_array_offset_in_bytes()
1846     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1847     imulptr(index, reg2); // XXX l ?
1848     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1849 
1850     // Update the case count
1851     increment_mdp_data_at(mdp,
1852                           index,
1853                           in_bytes(MultiBranchData::relative_count_offset()));
1854 
1855     // The method data pointer needs to be updated.
1856     update_mdp_by_offset(mdp,
1857                          index,
1858                          in_bytes(MultiBranchData::
1859                                   relative_displacement_offset()));
1860 
1861     bind(profile_continue);
1862   }
1863 }
1864 
1865 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1866                                                                               Register array,
1867                                                                               Register tmp) {
1868   if (ProfileInterpreter) {
1869     Label profile_continue;
1870 
1871     // If no method data exists, go to profile_continue.
1872     test_method_data_pointer(mdp, profile_continue);
1873 
1874     mov(tmp, array);
1875     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1876 
1877     Label not_flat;
1878     test_non_flat_array_oop(array, tmp, not_flat);
1879 
1880     set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1881 
1882     bind(not_flat);
1883 
1884     Label not_null_free;
1885     test_non_null_free_array_oop(array, tmp, not_null_free);
1886 
1887     set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1888 
1889     bind(not_null_free);
1890 
1891     bind(profile_continue);
1892   }
1893 }
1894 
1895 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1896                                                                            Register array,
1897                                                                            Register tmp);
1898 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1899                                                                             Register array,
1900                                                                             Register tmp);
1901 
1902 
1903 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1904   if (ProfileInterpreter) {
1905     Label profile_continue;
1906 
1907     // If no method data exists, go to profile_continue.
1908     test_method_data_pointer(mdp, profile_continue);
1909 
1910     Label done, update;
1911     testptr(element, element);
1912     jccb(Assembler::notZero, update);
1913     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1914     jmp(done);
1915 
1916     bind(update);
1917     load_klass(tmp, element, rscratch1);
1918 
1919     // Record the object type.
1920     record_klass_in_profile(tmp, mdp, tmp2);
1921 
1922     bind(done);
1923 
1924     // The method data pointer needs to be updated.
1925     update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1926 
1927     bind(profile_continue);
1928   }
1929 }
1930 
1931 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1932                                                      Register element,
1933                                                      Register tmp) {
1934   if (ProfileInterpreter) {
1935     Label profile_continue;
1936 
1937     // If no method data exists, go to profile_continue.
1938     test_method_data_pointer(mdp, profile_continue);
1939 
1940     mov(tmp, element);
1941     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1942 
1943     // The method data pointer needs to be updated.
1944     update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1945 
1946     bind(profile_continue);
1947   }
1948 }
1949 
1950 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1951                                              Register left,
1952                                              Register right,
1953                                              Register tmp) {
1954   if (ProfileInterpreter) {
1955     Label profile_continue;
1956 
1957     // If no method data exists, go to profile_continue.
1958     test_method_data_pointer(mdp, profile_continue);
1959 
1960     mov(tmp, left);
1961     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1962 
1963     Label left_not_inline_type;
1964     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1965     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1966     bind(left_not_inline_type);
1967 
1968     mov(tmp, right);
1969     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1970 
1971     Label right_not_inline_type;
1972     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1973     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1974     bind(right_not_inline_type);
1975 
1976     bind(profile_continue);
1977   }
1978 }
1979 
1980 
1981 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1982   if (state == atos) {
1983     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1984   }
1985 }
1986 
1987 
1988 // Jump if ((*counter_addr += increment) & mask) == 0
1989 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1990                                                         Register scratch, Label* where) {
1991   // This update is actually not atomic and can lose a number of updates
1992   // under heavy contention, but the alternative of using the (contended)
1993   // atomic update here penalizes profiling paths too much.
1994   movl(scratch, counter_addr);
1995   incrementl(scratch, InvocationCounter::count_increment);
1996   movl(counter_addr, scratch);
1997   andl(scratch, mask);
1998   if (where != nullptr) {
< prev index next >