< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compiler_globals.hpp"
  27 #include "interp_masm_x86.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "logging/log.hpp"
  31 #include "oops/arrayOop.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"

  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/jvmtiThreadState.hpp"
  37 #include "runtime/basicLock.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/javaThread.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "utilities/powerOfTwo.hpp"
  43 
  44 // Implementation of InterpreterMacroAssembler
  45 
  46 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  47   assert(entry, "Entry must have been generated by now");
  48   jump(RuntimeAddress(entry));
  49 }
  50 
  51 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  52   Label update, next, none;
  53 
  54   interp_verify_oop(obj, atos);

 133         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 134         profile_obj_type(tmp, mdo_arg_addr);
 135 
 136         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 137         addptr(mdp, to_add);
 138         off_to_args += to_add;
 139       }
 140 
 141       if (MethodData::profile_return()) {
 142         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 143         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 144       }
 145 
 146       bind(done);
 147 
 148       if (MethodData::profile_return()) {
 149         // We're right after the type profile for the last
 150         // argument. tmp is the number of cells left in the
 151         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 152         // if there's a return to profile.
 153         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 154         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 155         addptr(mdp, tmp);
 156       }
 157       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 158     } else {
 159       assert(MethodData::profile_return(), "either profile call args or call ret");
 160       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 161     }
 162 
 163     // mdp points right after the end of the
 164     // CallTypeData/VirtualCallTypeData, right after the cells for the
 165     // return value type if there's one
 166 
 167     bind(profile_continue);
 168   }
 169 }
 170 
 171 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 172   assert_different_registers(mdp, ret, tmp, _bcp_register);
 173   if (ProfileInterpreter && MethodData::profile_return()) {

 178     if (MethodData::profile_return_jsr292_only()) {
 179       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 180 
 181       // If we don't profile all invoke bytecodes we must make sure
 182       // it's a bytecode we indeed profile. We can't go back to the
 183       // beginning of the ProfileData we intend to update to check its
 184       // type because we're right after it and we don't known its
 185       // length
 186       Label do_profile;
 187       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 188       jcc(Assembler::equal, do_profile);
 189       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 190       jcc(Assembler::equal, do_profile);
 191       get_method(tmp);
 192       cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 193       jcc(Assembler::notEqual, profile_continue);
 194 
 195       bind(do_profile);
 196     }
 197 
 198     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
 199     mov(tmp, ret);
 200     profile_obj_type(tmp, mdo_ret_addr);
 201 
 202     bind(profile_continue);
 203   }
 204 }
 205 
 206 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 207   if (ProfileInterpreter && MethodData::profile_parameters()) {
 208     Label profile_continue;
 209 
 210     test_method_data_pointer(mdp, profile_continue);
 211 
 212     // Load the offset of the area within the MDO used for
 213     // parameters. If it's negative we're not profiling any parameters
 214     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 215     testl(tmp1, tmp1);
 216     jcc(Assembler::negative, profile_continue);
 217 
 218     // Compute a pointer to the area for parameters from the offset

 538 
 539   const int method_offset = in_bytes(
 540     ConstantPoolCache::base_offset() +
 541       ((byte_no == TemplateTable::f2_byte)
 542        ? ConstantPoolCacheEntry::f2_offset()
 543        : ConstantPoolCacheEntry::f1_offset()));
 544 
 545   movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*
 546 }
 547 
 548 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 549 // subtype of super_klass.
 550 //
 551 // Args:
 552 //      rax: superklass
 553 //      Rsub_klass: subklass
 554 //
 555 // Kills:
 556 //      rcx, rdi
 557 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 558                                                   Label& ok_is_subtype) {

 559   assert(Rsub_klass != rax, "rax holds superklass");
 560   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 561   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 562   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 563   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 564 
 565   // Profile the not-null value's klass.
 566   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi


 567 
 568   // Do the check.
 569   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 570 
 571   // Profile the failure of the check.
 572   profile_typecheck_failed(rcx); // blows rcx


 573 }
 574 
 575 
 576 #ifndef _LP64
 577 void InterpreterMacroAssembler::f2ieee() {
 578   if (IEEEPrecision) {
 579     fstp_s(Address(rsp, 0));
 580     fld_s(Address(rsp, 0));
 581   }
 582 }
 583 
 584 
 585 void InterpreterMacroAssembler::d2ieee() {
 586   if (IEEEPrecision) {
 587     fstp_d(Address(rsp, 0));
 588     fld_d(Address(rsp, 0));
 589   }
 590 }
 591 #endif // _LP64
 592 

 998   // the stack, will call InterpreterRuntime::at_unwind.
 999   Label slow_path;
1000   Label fast_path;
1001   safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
1002   jmp(fast_path);
1003   bind(slow_path);
1004   push(state);
1005   set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
1006   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
1007   NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
1008   reset_last_Java_frame(rthread, true);
1009   pop(state);
1010   bind(fast_path);
1011 
1012   // get the value of _do_not_unlock_if_synchronized into rdx
1013   const Address do_not_unlock_if_synchronized(rthread,
1014     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1015   movbool(rbx, do_not_unlock_if_synchronized);
1016   movbool(do_not_unlock_if_synchronized, false); // reset the flag
1017 
1018  // get method access flags
1019   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1020   movl(rcx, Address(rcx, Method::access_flags_offset()));
1021   testl(rcx, JVM_ACC_SYNCHRONIZED);
1022   jcc(Assembler::zero, unlocked);
1023 
1024   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1025   // is set.
1026   testbool(rbx);
1027   jcc(Assembler::notZero, no_unlock);
1028 
1029   // unlock monitor
1030   push(state); // save result
1031 
1032   // BasicObjectLock will be first in list, since this is a
1033   // synchronized method. However, need to check that the object has
1034   // not been unlocked by an explicit monitorexit bytecode.
1035   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1036                         wordSize - (int) sizeof(BasicObjectLock));
1037   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1038   // register for unlock_object to pass to VM directly

1122     bind(loop);
1123     // check if current entry is used
1124     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
1125     jcc(Assembler::notEqual, exception);
1126 
1127     addptr(rmon, entry_size); // otherwise advance to next entry
1128     bind(entry);
1129     cmpptr(rmon, rbx); // check if bottom reached
1130     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1131   }
1132 
1133   bind(no_unlock);
1134 
1135   // jvmti support
1136   if (notify_jvmdi) {
1137     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1138   } else {
1139     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1140   }
1141 
1142   // remove activation
1143   // get sender sp
1144   movptr(rbx,
1145          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1146   if (StackReservedPages > 0) {


1147     // testing if reserved zone needs to be re-enabled
1148     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1149     Label no_reserved_zone_enabling;
1150 
1151     NOT_LP64(get_thread(rthread);)
1152 
1153     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1154     jcc(Assembler::equal, no_reserved_zone_enabling);
1155 
1156     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1157     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1158 
1159     call_VM_leaf(
1160       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1161     call_VM(noreg, CAST_FROM_FN_PTR(address,
1162                    InterpreterRuntime::throw_delayed_StackOverflowError));
1163     should_not_reach_here();
1164 
1165     bind(no_reserved_zone_enabling);
1166   }









































1167   leave();                           // remove frame anchor
1168   pop(ret_addr);                     // get return address
1169   mov(rsp, rbx);                     // set sp to sender sp
1170   pop_cont_fastpath();
1171 }
1172 
1173 void InterpreterMacroAssembler::get_method_counters(Register method,
1174                                                     Register mcs, Label& skip) {
1175   Label has_counters;
1176   movptr(mcs, Address(method, Method::method_counters_offset()));
1177   testptr(mcs, mcs);
1178   jcc(Assembler::notZero, has_counters);
1179   call_VM(noreg, CAST_FROM_FN_PTR(address,
1180           InterpreterRuntime::build_method_counters), method);
1181   movptr(mcs, Address(method,Method::method_counters_offset()));
1182   testptr(mcs, mcs);
1183   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1184   bind(has_counters);
1185 }
1186 










































































































1187 
1188 // Lock object
1189 //
1190 // Args:
1191 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1192 //
1193 // Kills:
1194 //      rax, rbx
1195 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1196   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1197          "The argument is only for looks. It must be c_rarg1");
1198 
1199   if (UseHeavyMonitors) {
1200     call_VM(noreg,
1201             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1202             lock_reg);
1203   } else {
1204     Label count_locking, done, slow_case;
1205 
1206     const Register swap_reg = rax; // Must use rax for cmpxchg instruction

1211     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1212     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1213     const int mark_offset = lock_offset +
1214                             BasicLock::displaced_header_offset_in_bytes();
1215 
1216     // Load object pointer into obj_reg
1217     movptr(obj_reg, Address(lock_reg, obj_offset));
1218 
1219     if (DiagnoseSyncOnValueBasedClasses != 0) {
1220       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1221       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1222       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1223       jcc(Assembler::notZero, slow_case);
1224     }
1225 
1226     // Load immediate 1 into swap_reg %rax
1227     movl(swap_reg, 1);
1228 
1229     // Load (object->mark() | 1) into swap_reg %rax
1230     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




1231 
1232     // Save (object->mark() | 1) into BasicLock's displaced header
1233     movptr(Address(lock_reg, mark_offset), swap_reg);
1234 
1235     assert(lock_offset == 0,
1236            "displaced header must be first word in BasicObjectLock");
1237 
1238     lock();
1239     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1240     jcc(Assembler::zero, count_locking);
1241 
1242     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1243 
1244     // Fast check for recursive lock.
1245     //
1246     // Can apply the optimization only if this is a stack lock
1247     // allocated in this thread. For efficiency, we can focus on
1248     // recently allocated stack locks (instead of reading the stack
1249     // base and checking whether 'mark' points inside the current
1250     // thread stack):

1557     test_method_data_pointer(mdp, profile_continue);
1558 
1559     // We are taking a branch.  Increment the taken count.
1560     // We inline increment_mdp_data_at to return bumped_count in a register
1561     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1562     Address data(mdp, in_bytes(JumpData::taken_offset()));
1563     movptr(bumped_count, data);
1564     assert(DataLayout::counter_increment == 1,
1565             "flow-free idiom only works with 1");
1566     addptr(bumped_count, DataLayout::counter_increment);
1567     sbbptr(bumped_count, 0);
1568     movptr(data, bumped_count); // Store back out
1569 
1570     // The method data pointer needs to be updated to reflect the new target.
1571     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1572     bind(profile_continue);
1573   }
1574 }
1575 
1576 
1577 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1578   if (ProfileInterpreter) {
1579     Label profile_continue;
1580 
1581     // If no method data exists, go to profile_continue.
1582     test_method_data_pointer(mdp, profile_continue);
1583 
1584     // We are taking a branch.  Increment the not taken count.
1585     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1586 
1587     // The method data pointer needs to be updated to correspond to
1588     // the next bytecode
1589     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1590     bind(profile_continue);
1591   }
1592 }
1593 
1594 void InterpreterMacroAssembler::profile_call(Register mdp) {
1595   if (ProfileInterpreter) {
1596     Label profile_continue;
1597 
1598     // If no method data exists, go to profile_continue.
1599     test_method_data_pointer(mdp, profile_continue);
1600 
1601     // We are making a call.  Increment the count.
1602     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1603 
1604     // The method data pointer needs to be updated to reflect the new target.
1605     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1606     bind(profile_continue);
1607   }
1608 }
1609 

1944     // case_array_offset_in_bytes()
1945     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1946     imulptr(index, reg2); // XXX l ?
1947     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1948 
1949     // Update the case count
1950     increment_mdp_data_at(mdp,
1951                           index,
1952                           in_bytes(MultiBranchData::relative_count_offset()));
1953 
1954     // The method data pointer needs to be updated.
1955     update_mdp_by_offset(mdp,
1956                          index,
1957                          in_bytes(MultiBranchData::
1958                                   relative_displacement_offset()));
1959 
1960     bind(profile_continue);
1961   }
1962 }
1963 














































































1964 
1965 
1966 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1967   if (state == atos) {
1968     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1969   }
1970 }
1971 
1972 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
1973 #ifndef _LP64
1974   if ((state == ftos && UseSSE < 1) ||
1975       (state == dtos && UseSSE < 2)) {
1976     MacroAssembler::verify_FPU(stack_depth);
1977   }
1978 #endif
1979 }
1980 
1981 // Jump if ((*counter_addr += increment) & mask) == 0
1982 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
1983                                                         Register scratch, Label* where) {

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compiler_globals.hpp"
  27 #include "interp_masm_x86.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "logging/log.hpp"
  31 #include "oops/arrayOop.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/inlineKlass.hpp"
  36 #include "prims/jvmtiExport.hpp"
  37 #include "prims/jvmtiThreadState.hpp"
  38 #include "runtime/basicLock.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/javaThread.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/powerOfTwo.hpp"
  44 
  45 // Implementation of InterpreterMacroAssembler
  46 
  47 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  48   assert(entry, "Entry must have been generated by now");
  49   jump(RuntimeAddress(entry));
  50 }
  51 
  52 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  53   Label update, next, none;
  54 
  55   interp_verify_oop(obj, atos);

 134         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 135         profile_obj_type(tmp, mdo_arg_addr);
 136 
 137         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 138         addptr(mdp, to_add);
 139         off_to_args += to_add;
 140       }
 141 
 142       if (MethodData::profile_return()) {
 143         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 144         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 145       }
 146 
 147       bind(done);
 148 
 149       if (MethodData::profile_return()) {
 150         // We're right after the type profile for the last
 151         // argument. tmp is the number of cells left in the
 152         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 153         // if there's a return to profile.
 154         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 155         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 156         addptr(mdp, tmp);
 157       }
 158       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 159     } else {
 160       assert(MethodData::profile_return(), "either profile call args or call ret");
 161       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 162     }
 163 
 164     // mdp points right after the end of the
 165     // CallTypeData/VirtualCallTypeData, right after the cells for the
 166     // return value type if there's one
 167 
 168     bind(profile_continue);
 169   }
 170 }
 171 
 172 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 173   assert_different_registers(mdp, ret, tmp, _bcp_register);
 174   if (ProfileInterpreter && MethodData::profile_return()) {

 179     if (MethodData::profile_return_jsr292_only()) {
 180       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 181 
 182       // If we don't profile all invoke bytecodes we must make sure
 183       // it's a bytecode we indeed profile. We can't go back to the
 184       // beginning of the ProfileData we intend to update to check its
 185       // type because we're right after it and we don't known its
 186       // length
 187       Label do_profile;
 188       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 189       jcc(Assembler::equal, do_profile);
 190       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 191       jcc(Assembler::equal, do_profile);
 192       get_method(tmp);
 193       cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 194       jcc(Assembler::notEqual, profile_continue);
 195 
 196       bind(do_profile);
 197     }
 198 
 199     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
 200     mov(tmp, ret);
 201     profile_obj_type(tmp, mdo_ret_addr);
 202 
 203     bind(profile_continue);
 204   }
 205 }
 206 
 207 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 208   if (ProfileInterpreter && MethodData::profile_parameters()) {
 209     Label profile_continue;
 210 
 211     test_method_data_pointer(mdp, profile_continue);
 212 
 213     // Load the offset of the area within the MDO used for
 214     // parameters. If it's negative we're not profiling any parameters
 215     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 216     testl(tmp1, tmp1);
 217     jcc(Assembler::negative, profile_continue);
 218 
 219     // Compute a pointer to the area for parameters from the offset

 539 
 540   const int method_offset = in_bytes(
 541     ConstantPoolCache::base_offset() +
 542       ((byte_no == TemplateTable::f2_byte)
 543        ? ConstantPoolCacheEntry::f2_offset()
 544        : ConstantPoolCacheEntry::f1_offset()));
 545 
 546   movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*
 547 }
 548 
 549 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 550 // subtype of super_klass.
 551 //
 552 // Args:
 553 //      rax: superklass
 554 //      Rsub_klass: subklass
 555 //
 556 // Kills:
 557 //      rcx, rdi
 558 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 559                                                   Label& ok_is_subtype,
 560                                                   bool profile) {
 561   assert(Rsub_klass != rax, "rax holds superklass");
 562   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 563   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 564   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 565   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 566 
 567   // Profile the not-null value's klass.
 568   if (profile) {
 569     profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
 570   }
 571 
 572   // Do the check.
 573   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 574 
 575   // Profile the failure of the check.
 576   if (profile) {
 577     profile_typecheck_failed(rcx); // blows rcx
 578   }
 579 }
 580 
 581 
 582 #ifndef _LP64
 583 void InterpreterMacroAssembler::f2ieee() {
 584   if (IEEEPrecision) {
 585     fstp_s(Address(rsp, 0));
 586     fld_s(Address(rsp, 0));
 587   }
 588 }
 589 
 590 
 591 void InterpreterMacroAssembler::d2ieee() {
 592   if (IEEEPrecision) {
 593     fstp_d(Address(rsp, 0));
 594     fld_d(Address(rsp, 0));
 595   }
 596 }
 597 #endif // _LP64
 598 

1004   // the stack, will call InterpreterRuntime::at_unwind.
1005   Label slow_path;
1006   Label fast_path;
1007   safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
1008   jmp(fast_path);
1009   bind(slow_path);
1010   push(state);
1011   set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
1012   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
1013   NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
1014   reset_last_Java_frame(rthread, true);
1015   pop(state);
1016   bind(fast_path);
1017 
1018   // get the value of _do_not_unlock_if_synchronized into rdx
1019   const Address do_not_unlock_if_synchronized(rthread,
1020     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1021   movbool(rbx, do_not_unlock_if_synchronized);
1022   movbool(do_not_unlock_if_synchronized, false); // reset the flag
1023 
1024   // get method access flags
1025   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1026   movl(rcx, Address(rcx, Method::access_flags_offset()));
1027   testl(rcx, JVM_ACC_SYNCHRONIZED);
1028   jcc(Assembler::zero, unlocked);
1029 
1030   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1031   // is set.
1032   testbool(rbx);
1033   jcc(Assembler::notZero, no_unlock);
1034 
1035   // unlock monitor
1036   push(state); // save result
1037 
1038   // BasicObjectLock will be first in list, since this is a
1039   // synchronized method. However, need to check that the object has
1040   // not been unlocked by an explicit monitorexit bytecode.
1041   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1042                         wordSize - (int) sizeof(BasicObjectLock));
1043   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1044   // register for unlock_object to pass to VM directly

1128     bind(loop);
1129     // check if current entry is used
1130     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
1131     jcc(Assembler::notEqual, exception);
1132 
1133     addptr(rmon, entry_size); // otherwise advance to next entry
1134     bind(entry);
1135     cmpptr(rmon, rbx); // check if bottom reached
1136     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1137   }
1138 
1139   bind(no_unlock);
1140 
1141   // jvmti support
1142   if (notify_jvmdi) {
1143     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1144   } else {
1145     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1146   }
1147 




1148   if (StackReservedPages > 0) {
1149     movptr(rbx,
1150                Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1151     // testing if reserved zone needs to be re-enabled
1152     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1153     Label no_reserved_zone_enabling;
1154 
1155     NOT_LP64(get_thread(rthread);)
1156 
1157     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1158     jcc(Assembler::equal, no_reserved_zone_enabling);
1159 
1160     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1161     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1162 
1163     call_VM_leaf(
1164       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1165     call_VM(noreg, CAST_FROM_FN_PTR(address,
1166                    InterpreterRuntime::throw_delayed_StackOverflowError));
1167     should_not_reach_here();
1168 
1169     bind(no_reserved_zone_enabling);
1170   }
1171 
1172   // remove activation
1173   // get sender sp
1174   movptr(rbx,
1175          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1176 
1177   if (state == atos && InlineTypeReturnedAsFields) {
1178     // Check if we are returning an non-null inline type and load its fields into registers
1179     Label skip;
1180     test_oop_is_not_inline_type(rax, rscratch1, skip);
1181 
1182 #ifndef _LP64
1183     super_call_VM_leaf(StubRoutines::load_inline_type_fields_in_regs());
1184 #else
1185     // Load fields from a buffered value with an inline class specific handler
1186     load_klass(rdi, rax, rscratch1);
1187     movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
1188     movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
1189     // Unpack handler can be null if inline type is not scalarizable in returns
1190     testptr(rdi, rdi);
1191     jcc(Assembler::zero, skip);
1192     call(rdi);
1193 #endif
1194 #ifdef ASSERT
1195     if (StressInlineTypeReturnedAsFields) {
1196       // TODO 8284443 Enable this for value class returns (L-type descriptor)
1197       Label skip_stress;
1198       movptr(rscratch1, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1199       movptr(rscratch1, Address(rscratch1, Method::const_offset()));
1200       load_unsigned_byte(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
1201       cmpl(rscratch1, T_PRIMITIVE_OBJECT);
1202       jcc(Assembler::notEqual, skip_stress);
1203       load_klass(rax, rax, rscratch1);
1204       orptr(rax, 1);
1205       bind(skip_stress);
1206     }
1207 #endif
1208     // call above kills the value in rbx. Reload it.
1209     movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1210     bind(skip);
1211   }
1212   leave();                           // remove frame anchor
1213   pop(ret_addr);                     // get return address
1214   mov(rsp, rbx);                     // set sp to sender sp
1215   pop_cont_fastpath();
1216 }
1217 
1218 void InterpreterMacroAssembler::get_method_counters(Register method,
1219                                                     Register mcs, Label& skip) {
1220   Label has_counters;
1221   movptr(mcs, Address(method, Method::method_counters_offset()));
1222   testptr(mcs, mcs);
1223   jcc(Assembler::notZero, has_counters);
1224   call_VM(noreg, CAST_FROM_FN_PTR(address,
1225           InterpreterRuntime::build_method_counters), method);
1226   movptr(mcs, Address(method,Method::method_counters_offset()));
1227   testptr(mcs, mcs);
1228   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1229   bind(has_counters);
1230 }
1231 
1232 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
1233                                                   Register t1, Register t2,
1234                                                   bool clear_fields, Label& alloc_failed) {
1235   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
1236   {
1237     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0, rscratch1);
1238     // Trigger dtrace event for fastpath
1239     push(atos);
1240     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
1241     pop(atos);
1242   }
1243 }
1244 
1245 
1246 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
1247                                                      Register field_index, Register field_offset,
1248                                                      Register obj) {
1249   Label alloc_failed, empty_value, done;
1250   const Register src = field_offset;
1251   const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1252   const Register dst_temp   = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1253   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
1254 
1255   // Grap the inline field klass
1256   push(holder_klass);
1257   const Register field_klass = holder_klass;
1258   get_inline_type_field_klass(holder_klass, field_index, field_klass);
1259 
1260   //check for empty value klass
1261   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
1262 
1263   // allocate buffer
1264   push(obj); // save holder
1265   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1266 
1267   // Have an oop instance buffer, copy into it
1268   data_for_oop(obj, dst_temp, field_klass);
1269   pop(alloc_temp);             // restore holder
1270   lea(src, Address(alloc_temp, field_offset));
1271   // call_VM_leaf, clobbers a few regs, save restore new obj
1272   push(obj);
1273   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
1274   pop(obj);
1275   pop(holder_klass);
1276   jmp(done);
1277 
1278   bind(empty_value);
1279   get_empty_inline_type_oop(field_klass, dst_temp, obj);
1280   pop(holder_klass);
1281   jmp(done);
1282 
1283   bind(alloc_failed);
1284   pop(obj);
1285   pop(holder_klass);
1286   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
1287           obj, field_index, holder_klass);
1288 
1289   bind(done);
1290 }
1291 
1292 void InterpreterMacroAssembler::read_flattened_element(Register array, Register index,
1293                                                        Register t1, Register t2,
1294                                                        Register obj) {
1295   assert_different_registers(array, index, t1, t2);
1296   Label alloc_failed, empty_value, done;
1297   const Register array_klass = t2;
1298   const Register elem_klass = t1;
1299   const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1300   const Register dst_temp   = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1301 
1302   // load in array->klass()->element_klass()
1303   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1304   load_klass(array_klass, array, tmp_load_klass);
1305   movptr(elem_klass, Address(array_klass, ArrayKlass::element_klass_offset()));
1306 
1307   //check for empty value klass
1308   test_klass_is_empty_inline_type(elem_klass, dst_temp, empty_value);
1309 
1310   // calc source into "array_klass" and free up some regs
1311   const Register src = array_klass;
1312   push(index); // preserve index reg in case alloc_failed
1313   data_for_value_array_index(array, array_klass, index, src);
1314 
1315   allocate_instance(elem_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1316   // Have an oop instance buffer, copy into it
1317   store_ptr(0, obj); // preserve obj (overwrite index, no longer needed)
1318   data_for_oop(obj, dst_temp, elem_klass);
1319   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, elem_klass);
1320   pop(obj);
1321   jmp(done);
1322 
1323   bind(empty_value);
1324   get_empty_inline_type_oop(elem_klass, dst_temp, obj);
1325   jmp(done);
1326 
1327   bind(alloc_failed);
1328   pop(index);
1329   if (array == c_rarg2) {
1330     mov(elem_klass, array);
1331     array = elem_klass;
1332   }
1333   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index);
1334 
1335   bind(done);
1336 }
1337 
1338 
1339 // Lock object
1340 //
1341 // Args:
1342 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1343 //
1344 // Kills:
1345 //      rax, rbx
1346 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1347   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1348          "The argument is only for looks. It must be c_rarg1");
1349 
1350   if (UseHeavyMonitors) {
1351     call_VM(noreg,
1352             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1353             lock_reg);
1354   } else {
1355     Label count_locking, done, slow_case;
1356 
1357     const Register swap_reg = rax; // Must use rax for cmpxchg instruction

1362     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1363     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1364     const int mark_offset = lock_offset +
1365                             BasicLock::displaced_header_offset_in_bytes();
1366 
1367     // Load object pointer into obj_reg
1368     movptr(obj_reg, Address(lock_reg, obj_offset));
1369 
1370     if (DiagnoseSyncOnValueBasedClasses != 0) {
1371       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1372       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1373       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1374       jcc(Assembler::notZero, slow_case);
1375     }
1376 
1377     // Load immediate 1 into swap_reg %rax
1378     movl(swap_reg, 1);
1379 
1380     // Load (object->mark() | 1) into swap_reg %rax
1381     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1382     if (EnableValhalla) {
1383       // Mask inline_type bit such that we go to the slow path if object is an inline type
1384       andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
1385     }
1386 
1387     // Save (object->mark() | 1) into BasicLock's displaced header
1388     movptr(Address(lock_reg, mark_offset), swap_reg);
1389 
1390     assert(lock_offset == 0,
1391            "displaced header must be first word in BasicObjectLock");
1392 
1393     lock();
1394     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1395     jcc(Assembler::zero, count_locking);
1396 
1397     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1398 
1399     // Fast check for recursive lock.
1400     //
1401     // Can apply the optimization only if this is a stack lock
1402     // allocated in this thread. For efficiency, we can focus on
1403     // recently allocated stack locks (instead of reading the stack
1404     // base and checking whether 'mark' points inside the current
1405     // thread stack):

1712     test_method_data_pointer(mdp, profile_continue);
1713 
1714     // We are taking a branch.  Increment the taken count.
1715     // We inline increment_mdp_data_at to return bumped_count in a register
1716     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1717     Address data(mdp, in_bytes(JumpData::taken_offset()));
1718     movptr(bumped_count, data);
1719     assert(DataLayout::counter_increment == 1,
1720             "flow-free idiom only works with 1");
1721     addptr(bumped_count, DataLayout::counter_increment);
1722     sbbptr(bumped_count, 0);
1723     movptr(data, bumped_count); // Store back out
1724 
1725     // The method data pointer needs to be updated to reflect the new target.
1726     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1727     bind(profile_continue);
1728   }
1729 }
1730 
1731 
1732 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1733   if (ProfileInterpreter) {
1734     Label profile_continue;
1735 
1736     // If no method data exists, go to profile_continue.
1737     test_method_data_pointer(mdp, profile_continue);
1738 
1739     // We are taking a branch.  Increment the not taken count.
1740     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1741 
1742     // The method data pointer needs to be updated to correspond to
1743     // the next bytecode
1744     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()): in_bytes(BranchData::branch_data_size()));
1745     bind(profile_continue);
1746   }
1747 }
1748 
1749 void InterpreterMacroAssembler::profile_call(Register mdp) {
1750   if (ProfileInterpreter) {
1751     Label profile_continue;
1752 
1753     // If no method data exists, go to profile_continue.
1754     test_method_data_pointer(mdp, profile_continue);
1755 
1756     // We are making a call.  Increment the count.
1757     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1758 
1759     // The method data pointer needs to be updated to reflect the new target.
1760     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1761     bind(profile_continue);
1762   }
1763 }
1764 

2099     // case_array_offset_in_bytes()
2100     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
2101     imulptr(index, reg2); // XXX l ?
2102     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
2103 
2104     // Update the case count
2105     increment_mdp_data_at(mdp,
2106                           index,
2107                           in_bytes(MultiBranchData::relative_count_offset()));
2108 
2109     // The method data pointer needs to be updated.
2110     update_mdp_by_offset(mdp,
2111                          index,
2112                          in_bytes(MultiBranchData::
2113                                   relative_displacement_offset()));
2114 
2115     bind(profile_continue);
2116   }
2117 }
2118 
2119 void InterpreterMacroAssembler::profile_array(Register mdp,
2120                                               Register array,
2121                                               Register tmp) {
2122   if (ProfileInterpreter) {
2123     Label profile_continue;
2124 
2125     // If no method data exists, go to profile_continue.
2126     test_method_data_pointer(mdp, profile_continue);
2127 
2128     mov(tmp, array);
2129     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
2130 
2131     Label not_flat;
2132     test_non_flattened_array_oop(array, tmp, not_flat);
2133 
2134     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
2135 
2136     bind(not_flat);
2137 
2138     Label not_null_free;
2139     test_non_null_free_array_oop(array, tmp, not_null_free);
2140 
2141     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
2142 
2143     bind(not_null_free);
2144 
2145     bind(profile_continue);
2146   }
2147 }
2148 
2149 void InterpreterMacroAssembler::profile_element(Register mdp,
2150                                                 Register element,
2151                                                 Register tmp) {
2152   if (ProfileInterpreter) {
2153     Label profile_continue;
2154 
2155     // If no method data exists, go to profile_continue.
2156     test_method_data_pointer(mdp, profile_continue);
2157 
2158     mov(tmp, element);
2159     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
2160 
2161     // The method data pointer needs to be updated.
2162     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
2163 
2164     bind(profile_continue);
2165   }
2166 }
2167 
2168 void InterpreterMacroAssembler::profile_acmp(Register mdp,
2169                                              Register left,
2170                                              Register right,
2171                                              Register tmp) {
2172   if (ProfileInterpreter) {
2173     Label profile_continue;
2174 
2175     // If no method data exists, go to profile_continue.
2176     test_method_data_pointer(mdp, profile_continue);
2177 
2178     mov(tmp, left);
2179     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
2180 
2181     Label left_not_inline_type;
2182     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
2183     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
2184     bind(left_not_inline_type);
2185 
2186     mov(tmp, right);
2187     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
2188 
2189     Label right_not_inline_type;
2190     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
2191     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
2192     bind(right_not_inline_type);
2193 
2194     bind(profile_continue);
2195   }
2196 }
2197 
2198 
2199 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
2200   if (state == atos) {
2201     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
2202   }
2203 }
2204 
2205 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2206 #ifndef _LP64
2207   if ((state == ftos && UseSSE < 1) ||
2208       (state == dtos && UseSSE < 2)) {
2209     MacroAssembler::verify_FPU(stack_depth);
2210   }
2211 #endif
2212 }
2213 
2214 // Jump if ((*counter_addr += increment) & mask) == 0
2215 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
2216                                                         Register scratch, Label* where) {
< prev index next >