< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compiler_globals.hpp"
  27 #include "interp_masm_x86.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "logging/log.hpp"
  31 #include "oops/arrayOop.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"

  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/jvmtiThreadState.hpp"
  37 #include "runtime/basicLock.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "utilities/powerOfTwo.hpp"
  43 
  44 // Implementation of InterpreterMacroAssembler
  45 
  46 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  47   assert(entry, "Entry must have been generated by now");
  48   jump(RuntimeAddress(entry));
  49 }
  50 
  51 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  52   Label update, next, none;
  53 
  54   interp_verify_oop(obj, atos);

 134         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 135         profile_obj_type(tmp, mdo_arg_addr);
 136 
 137         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 138         addptr(mdp, to_add);
 139         off_to_args += to_add;
 140       }
 141 
 142       if (MethodData::profile_return()) {
 143         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 144         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 145       }
 146 
 147       bind(done);
 148 
 149       if (MethodData::profile_return()) {
 150         // We're right after the type profile for the last
 151         // argument. tmp is the number of cells left in the
 152         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 153         // if there's a return to profile.
 154         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 155         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 156         addptr(mdp, tmp);
 157       }
 158       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 159     } else {
 160       assert(MethodData::profile_return(), "either profile call args or call ret");
 161       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 162     }
 163 
 164     // mdp points right after the end of the
 165     // CallTypeData/VirtualCallTypeData, right after the cells for the
 166     // return value type if there's one
 167 
 168     bind(profile_continue);
 169   }
 170 }
 171 
 172 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 173   assert_different_registers(mdp, ret, tmp, _bcp_register);
 174   if (ProfileInterpreter && MethodData::profile_return()) {

 179     if (MethodData::profile_return_jsr292_only()) {
 180       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 181 
 182       // If we don't profile all invoke bytecodes we must make sure
 183       // it's a bytecode we indeed profile. We can't go back to the
 184       // begining of the ProfileData we intend to update to check its
 185       // type because we're right after it and we don't known its
 186       // length
 187       Label do_profile;
 188       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 189       jcc(Assembler::equal, do_profile);
 190       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 191       jcc(Assembler::equal, do_profile);
 192       get_method(tmp);
 193       cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 194       jcc(Assembler::notEqual, profile_continue);
 195 
 196       bind(do_profile);
 197     }
 198 
 199     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
 200     mov(tmp, ret);
 201     profile_obj_type(tmp, mdo_ret_addr);
 202 
 203     bind(profile_continue);
 204   }
 205 }
 206 
 207 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 208   if (ProfileInterpreter && MethodData::profile_parameters()) {
 209     Label profile_continue;
 210 
 211     test_method_data_pointer(mdp, profile_continue);
 212 
 213     // Load the offset of the area within the MDO used for
 214     // parameters. If it's negative we're not profiling any parameters
 215     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 216     testl(tmp1, tmp1);
 217     jcc(Assembler::negative, profile_continue);
 218 
 219     // Compute a pointer to the area for parameters from the offset

 539 
 540   const int method_offset = in_bytes(
 541     ConstantPoolCache::base_offset() +
 542       ((byte_no == TemplateTable::f2_byte)
 543        ? ConstantPoolCacheEntry::f2_offset()
 544        : ConstantPoolCacheEntry::f1_offset()));
 545 
 546   movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*
 547 }
 548 
 549 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 550 // subtype of super_klass.
 551 //
 552 // Args:
 553 //      rax: superklass
 554 //      Rsub_klass: subklass
 555 //
 556 // Kills:
 557 //      rcx, rdi
 558 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 559                                                   Label& ok_is_subtype) {

 560   assert(Rsub_klass != rax, "rax holds superklass");
 561   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 562   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 563   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 564   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 565 
 566   // Profile the not-null value's klass.
 567   profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi


 568 
 569   // Do the check.
 570   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 571 
 572   // Profile the failure of the check.
 573   profile_typecheck_failed(rcx); // blows rcx


 574 }
 575 
 576 
 577 #ifndef _LP64
 578 void InterpreterMacroAssembler::f2ieee() {
 579   if (IEEEPrecision) {
 580     fstp_s(Address(rsp, 0));
 581     fld_s(Address(rsp, 0));
 582   }
 583 }
 584 
 585 
 586 void InterpreterMacroAssembler::d2ieee() {
 587   if (IEEEPrecision) {
 588     fstp_d(Address(rsp, 0));
 589     fld_d(Address(rsp, 0));
 590   }
 591 }
 592 #endif // _LP64
 593 

 999   // the stack, will call InterpreterRuntime::at_unwind.
1000   Label slow_path;
1001   Label fast_path;
1002   safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
1003   jmp(fast_path);
1004   bind(slow_path);
1005   push(state);
1006   set_last_Java_frame(rthread, noreg, rbp, (address)pc());
1007   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
1008   NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
1009   reset_last_Java_frame(rthread, true);
1010   pop(state);
1011   bind(fast_path);
1012 
1013   // get the value of _do_not_unlock_if_synchronized into rdx
1014   const Address do_not_unlock_if_synchronized(rthread,
1015     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1016   movbool(rbx, do_not_unlock_if_synchronized);
1017   movbool(do_not_unlock_if_synchronized, false); // reset the flag
1018 
1019  // get method access flags
1020   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1021   movl(rcx, Address(rcx, Method::access_flags_offset()));
1022   testl(rcx, JVM_ACC_SYNCHRONIZED);
1023   jcc(Assembler::zero, unlocked);
1024 
1025   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1026   // is set.
1027   testbool(rbx);
1028   jcc(Assembler::notZero, no_unlock);
1029 
1030   // unlock monitor
1031   push(state); // save result
1032 
1033   // BasicObjectLock will be first in list, since this is a
1034   // synchronized method. However, need to check that the object has
1035   // not been unlocked by an explicit monitorexit bytecode.
1036   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1037                         wordSize - (int) sizeof(BasicObjectLock));
1038   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1039   // register for unlock_object to pass to VM directly

1123     bind(loop);
1124     // check if current entry is used
1125     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1126     jcc(Assembler::notEqual, exception);
1127 
1128     addptr(rmon, entry_size); // otherwise advance to next entry
1129     bind(entry);
1130     cmpptr(rmon, rbx); // check if bottom reached
1131     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1132   }
1133 
1134   bind(no_unlock);
1135 
1136   // jvmti support
1137   if (notify_jvmdi) {
1138     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1139   } else {
1140     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1141   }
1142 
1143   // remove activation
1144   // get sender sp
1145   movptr(rbx,
1146          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1147   if (StackReservedPages > 0) {


1148     // testing if reserved zone needs to be re-enabled
1149     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1150     Label no_reserved_zone_enabling;
1151 
1152     NOT_LP64(get_thread(rthread);)
1153 
1154     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1155     jcc(Assembler::equal, no_reserved_zone_enabling);
1156 
1157     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1158     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1159 
1160     call_VM_leaf(
1161       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1162     call_VM(noreg, CAST_FROM_FN_PTR(address,
1163                    InterpreterRuntime::throw_delayed_StackOverflowError));
1164     should_not_reach_here();
1165 
1166     bind(no_reserved_zone_enabling);
1167   }


































1168   leave();                           // remove frame anchor
1169   pop(ret_addr);                     // get return address
1170   mov(rsp, rbx);                     // set sp to sender sp
1171 }
1172 
1173 void InterpreterMacroAssembler::get_method_counters(Register method,
1174                                                     Register mcs, Label& skip) {
1175   Label has_counters;
1176   movptr(mcs, Address(method, Method::method_counters_offset()));
1177   testptr(mcs, mcs);
1178   jcc(Assembler::notZero, has_counters);
1179   call_VM(noreg, CAST_FROM_FN_PTR(address,
1180           InterpreterRuntime::build_method_counters), method);
1181   movptr(mcs, Address(method,Method::method_counters_offset()));
1182   testptr(mcs, mcs);
1183   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1184   bind(has_counters);
1185 }
1186 










































































































1187 
1188 // Lock object
1189 //
1190 // Args:
1191 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1192 //
1193 // Kills:
1194 //      rax, rbx
1195 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1196   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1197          "The argument is only for looks. It must be c_rarg1");
1198 
1199   if (UseHeavyMonitors) {
1200     call_VM(noreg,
1201             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1202             lock_reg);
1203   } else {
1204     Label done;
1205 
1206     const Register swap_reg = rax; // Must use rax for cmpxchg instruction

1213     const int mark_offset = lock_offset +
1214                             BasicLock::displaced_header_offset_in_bytes();
1215 
1216     Label slow_case;
1217 
1218     // Load object pointer into obj_reg
1219     movptr(obj_reg, Address(lock_reg, obj_offset));
1220 
1221     if (DiagnoseSyncOnValueBasedClasses != 0) {
1222       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1223       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1224       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1225       jcc(Assembler::notZero, slow_case);
1226     }
1227 
1228     // Load immediate 1 into swap_reg %rax
1229     movl(swap_reg, (int32_t)1);
1230 
1231     // Load (object->mark() | 1) into swap_reg %rax
1232     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




1233 
1234     // Save (object->mark() | 1) into BasicLock's displaced header
1235     movptr(Address(lock_reg, mark_offset), swap_reg);
1236 
1237     assert(lock_offset == 0,
1238            "displaced header must be first word in BasicObjectLock");
1239 
1240     lock();
1241     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1242     jcc(Assembler::zero, done);
1243 
1244     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1245 
1246     // Fast check for recursive lock.
1247     //
1248     // Can apply the optimization only if this is a stack lock
1249     // allocated in this thread. For efficiency, we can focus on
1250     // recently allocated stack locks (instead of reading the stack
1251     // base and checking whether 'mark' points inside the current
1252     // thread stack):

1551     test_method_data_pointer(mdp, profile_continue);
1552 
1553     // We are taking a branch.  Increment the taken count.
1554     // We inline increment_mdp_data_at to return bumped_count in a register
1555     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1556     Address data(mdp, in_bytes(JumpData::taken_offset()));
1557     movptr(bumped_count, data);
1558     assert(DataLayout::counter_increment == 1,
1559             "flow-free idiom only works with 1");
1560     addptr(bumped_count, DataLayout::counter_increment);
1561     sbbptr(bumped_count, 0);
1562     movptr(data, bumped_count); // Store back out
1563 
1564     // The method data pointer needs to be updated to reflect the new target.
1565     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1566     bind(profile_continue);
1567   }
1568 }
1569 
1570 
1571 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1572   if (ProfileInterpreter) {
1573     Label profile_continue;
1574 
1575     // If no method data exists, go to profile_continue.
1576     test_method_data_pointer(mdp, profile_continue);
1577 
1578     // We are taking a branch.  Increment the not taken count.
1579     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1580 
1581     // The method data pointer needs to be updated to correspond to
1582     // the next bytecode
1583     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1584     bind(profile_continue);
1585   }
1586 }
1587 
1588 void InterpreterMacroAssembler::profile_call(Register mdp) {
1589   if (ProfileInterpreter) {
1590     Label profile_continue;
1591 
1592     // If no method data exists, go to profile_continue.
1593     test_method_data_pointer(mdp, profile_continue);
1594 
1595     // We are making a call.  Increment the count.
1596     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1597 
1598     // The method data pointer needs to be updated to reflect the new target.
1599     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1600     bind(profile_continue);
1601   }
1602 }
1603 

1938     // case_array_offset_in_bytes()
1939     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
1940     imulptr(index, reg2); // XXX l ?
1941     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
1942 
1943     // Update the case count
1944     increment_mdp_data_at(mdp,
1945                           index,
1946                           in_bytes(MultiBranchData::relative_count_offset()));
1947 
1948     // The method data pointer needs to be updated.
1949     update_mdp_by_offset(mdp,
1950                          index,
1951                          in_bytes(MultiBranchData::
1952                                   relative_displacement_offset()));
1953 
1954     bind(profile_continue);
1955   }
1956 }
1957 














































































1958 
1959 
1960 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1961   if (state == atos) {
1962     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1963   }
1964 }
1965 
1966 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
1967 #ifndef _LP64
1968   if ((state == ftos && UseSSE < 1) ||
1969       (state == dtos && UseSSE < 2)) {
1970     MacroAssembler::verify_FPU(stack_depth);
1971   }
1972 #endif
1973 }
1974 
1975 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1976 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1977                                                         int increment, Address mask,

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compiler_globals.hpp"
  27 #include "interp_masm_x86.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "logging/log.hpp"
  31 #include "oops/arrayOop.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/method.hpp"
  35 #include "oops/inlineKlass.hpp"
  36 #include "prims/jvmtiExport.hpp"
  37 #include "prims/jvmtiThreadState.hpp"
  38 #include "runtime/basicLock.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "utilities/powerOfTwo.hpp"
  44 
  45 // Implementation of InterpreterMacroAssembler
  46 
  47 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  48   assert(entry, "Entry must have been generated by now");
  49   jump(RuntimeAddress(entry));
  50 }
  51 
  52 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  53   Label update, next, none;
  54 
  55   interp_verify_oop(obj, atos);

 135         Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
 136         profile_obj_type(tmp, mdo_arg_addr);
 137 
 138         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
 139         addptr(mdp, to_add);
 140         off_to_args += to_add;
 141       }
 142 
 143       if (MethodData::profile_return()) {
 144         movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
 145         subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
 146       }
 147 
 148       bind(done);
 149 
 150       if (MethodData::profile_return()) {
 151         // We're right after the type profile for the last
 152         // argument. tmp is the number of cells left in the
 153         // CallTypeData/VirtualCallTypeData to reach its end. Non null
 154         // if there's a return to profile.
 155         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
 156         shll(tmp, log2i_exact((int)DataLayout::cell_size));
 157         addptr(mdp, tmp);
 158       }
 159       movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
 160     } else {
 161       assert(MethodData::profile_return(), "either profile call args or call ret");
 162       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
 163     }
 164 
 165     // mdp points right after the end of the
 166     // CallTypeData/VirtualCallTypeData, right after the cells for the
 167     // return value type if there's one
 168 
 169     bind(profile_continue);
 170   }
 171 }
 172 
 173 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
 174   assert_different_registers(mdp, ret, tmp, _bcp_register);
 175   if (ProfileInterpreter && MethodData::profile_return()) {

 180     if (MethodData::profile_return_jsr292_only()) {
 181       assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
 182 
 183       // If we don't profile all invoke bytecodes we must make sure
 184       // it's a bytecode we indeed profile. We can't go back to the
 185       // begining of the ProfileData we intend to update to check its
 186       // type because we're right after it and we don't known its
 187       // length
 188       Label do_profile;
 189       cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
 190       jcc(Assembler::equal, do_profile);
 191       cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
 192       jcc(Assembler::equal, do_profile);
 193       get_method(tmp);
 194       cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
 195       jcc(Assembler::notEqual, profile_continue);
 196 
 197       bind(do_profile);
 198     }
 199 
 200     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
 201     mov(tmp, ret);
 202     profile_obj_type(tmp, mdo_ret_addr);
 203 
 204     bind(profile_continue);
 205   }
 206 }
 207 
 208 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
 209   if (ProfileInterpreter && MethodData::profile_parameters()) {
 210     Label profile_continue;
 211 
 212     test_method_data_pointer(mdp, profile_continue);
 213 
 214     // Load the offset of the area within the MDO used for
 215     // parameters. If it's negative we're not profiling any parameters
 216     movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
 217     testl(tmp1, tmp1);
 218     jcc(Assembler::negative, profile_continue);
 219 
 220     // Compute a pointer to the area for parameters from the offset

 540 
 541   const int method_offset = in_bytes(
 542     ConstantPoolCache::base_offset() +
 543       ((byte_no == TemplateTable::f2_byte)
 544        ? ConstantPoolCacheEntry::f2_offset()
 545        : ConstantPoolCacheEntry::f1_offset()));
 546 
 547   movptr(method, Address(cache, index, Address::times_ptr, method_offset)); // get f1 Method*
 548 }
 549 
 550 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 551 // subtype of super_klass.
 552 //
 553 // Args:
 554 //      rax: superklass
 555 //      Rsub_klass: subklass
 556 //
 557 // Kills:
 558 //      rcx, rdi
 559 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 560                                                   Label& ok_is_subtype,
 561                                                   bool profile) {
 562   assert(Rsub_klass != rax, "rax holds superklass");
 563   LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
 564   LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
 565   assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
 566   assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
 567 
 568   // Profile the not-null value's klass.
 569   if (profile) {
 570     profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi
 571   }
 572 
 573   // Do the check.
 574   check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx
 575 
 576   // Profile the failure of the check.
 577   if (profile) {
 578     profile_typecheck_failed(rcx); // blows rcx
 579   }
 580 }
 581 
 582 
 583 #ifndef _LP64
 584 void InterpreterMacroAssembler::f2ieee() {
 585   if (IEEEPrecision) {
 586     fstp_s(Address(rsp, 0));
 587     fld_s(Address(rsp, 0));
 588   }
 589 }
 590 
 591 
 592 void InterpreterMacroAssembler::d2ieee() {
 593   if (IEEEPrecision) {
 594     fstp_d(Address(rsp, 0));
 595     fld_d(Address(rsp, 0));
 596   }
 597 }
 598 #endif // _LP64
 599 

1005   // the stack, will call InterpreterRuntime::at_unwind.
1006   Label slow_path;
1007   Label fast_path;
1008   safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
1009   jmp(fast_path);
1010   bind(slow_path);
1011   push(state);
1012   set_last_Java_frame(rthread, noreg, rbp, (address)pc());
1013   super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
1014   NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
1015   reset_last_Java_frame(rthread, true);
1016   pop(state);
1017   bind(fast_path);
1018 
1019   // get the value of _do_not_unlock_if_synchronized into rdx
1020   const Address do_not_unlock_if_synchronized(rthread,
1021     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1022   movbool(rbx, do_not_unlock_if_synchronized);
1023   movbool(do_not_unlock_if_synchronized, false); // reset the flag
1024 
1025   // get method access flags
1026   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1027   movl(rcx, Address(rcx, Method::access_flags_offset()));
1028   testl(rcx, JVM_ACC_SYNCHRONIZED);
1029   jcc(Assembler::zero, unlocked);
1030 
1031   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1032   // is set.
1033   testbool(rbx);
1034   jcc(Assembler::notZero, no_unlock);
1035 
1036   // unlock monitor
1037   push(state); // save result
1038 
1039   // BasicObjectLock will be first in list, since this is a
1040   // synchronized method. However, need to check that the object has
1041   // not been unlocked by an explicit monitorexit bytecode.
1042   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1043                         wordSize - (int) sizeof(BasicObjectLock));
1044   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1045   // register for unlock_object to pass to VM directly

1129     bind(loop);
1130     // check if current entry is used
1131     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1132     jcc(Assembler::notEqual, exception);
1133 
1134     addptr(rmon, entry_size); // otherwise advance to next entry
1135     bind(entry);
1136     cmpptr(rmon, rbx); // check if bottom reached
1137     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1138   }
1139 
1140   bind(no_unlock);
1141 
1142   // jvmti support
1143   if (notify_jvmdi) {
1144     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1145   } else {
1146     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1147   }
1148 




1149   if (StackReservedPages > 0) {
1150     movptr(rbx,
1151                Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1152     // testing if reserved zone needs to be re-enabled
1153     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1154     Label no_reserved_zone_enabling;
1155 
1156     NOT_LP64(get_thread(rthread);)
1157 
1158     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1159     jcc(Assembler::equal, no_reserved_zone_enabling);
1160 
1161     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1162     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1163 
1164     call_VM_leaf(
1165       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1166     call_VM(noreg, CAST_FROM_FN_PTR(address,
1167                    InterpreterRuntime::throw_delayed_StackOverflowError));
1168     should_not_reach_here();
1169 
1170     bind(no_reserved_zone_enabling);
1171   }
1172 
1173   // remove activation
1174   // get sender sp
1175   movptr(rbx,
1176          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1177 
1178   if (state == atos && InlineTypeReturnedAsFields) {
1179     Label skip;
1180     // Test if the return type is an inline type
1181     movptr(rdi, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1182     movptr(rdi, Address(rdi, Method::const_offset()));
1183     load_unsigned_byte(rdi, Address(rdi, ConstMethod::result_type_offset()));
1184     cmpl(rdi, T_INLINE_TYPE);
1185     jcc(Assembler::notEqual, skip);
1186 
1187     // We are returning an inline type, load its fields into registers
1188 #ifndef _LP64
1189     super_call_VM_leaf(StubRoutines::load_inline_type_fields_in_regs());
1190 #else
1191     // Load fields from a buffered value with an inline class specific handler
1192     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1193     load_klass(rdi, rax, tmp_load_klass);
1194     movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset()));
1195     movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset()));
1196 
1197     testptr(rdi, rdi);
1198     jcc(Assembler::equal, skip);
1199 
1200     call(rdi);
1201 #endif
1202     // call above kills the value in rbx. Reload it.
1203     movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1204     bind(skip);
1205   }
1206   leave();                           // remove frame anchor
1207   pop(ret_addr);                     // get return address
1208   mov(rsp, rbx);                     // set sp to sender sp
1209 }
1210 
1211 void InterpreterMacroAssembler::get_method_counters(Register method,
1212                                                     Register mcs, Label& skip) {
1213   Label has_counters;
1214   movptr(mcs, Address(method, Method::method_counters_offset()));
1215   testptr(mcs, mcs);
1216   jcc(Assembler::notZero, has_counters);
1217   call_VM(noreg, CAST_FROM_FN_PTR(address,
1218           InterpreterRuntime::build_method_counters), method);
1219   movptr(mcs, Address(method,Method::method_counters_offset()));
1220   testptr(mcs, mcs);
1221   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1222   bind(has_counters);
1223 }
1224 
1225 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
1226                                                   Register t1, Register t2,
1227                                                   bool clear_fields, Label& alloc_failed) {
1228   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
1229   {
1230     SkipIfEqual skip_if(this, &DTraceAllocProbes, 0);
1231     // Trigger dtrace event for fastpath
1232     push(atos);
1233     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), new_obj);
1234     pop(atos);
1235   }
1236 }
1237 
1238 
1239 void InterpreterMacroAssembler::read_inlined_field(Register holder_klass,
1240                                                      Register field_index, Register field_offset,
1241                                                      Register obj) {
1242   Label alloc_failed, empty_value, done;
1243   const Register src = field_offset;
1244   const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1245   const Register dst_temp   = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1246   assert_different_registers(obj, holder_klass, field_index, field_offset, dst_temp);
1247 
1248   // Grap the inline field klass
1249   push(holder_klass);
1250   const Register field_klass = holder_klass;
1251   get_inline_type_field_klass(holder_klass, field_index, field_klass);
1252 
1253   //check for empty value klass
1254   test_klass_is_empty_inline_type(field_klass, dst_temp, empty_value);
1255 
1256   // allocate buffer
1257   push(obj); // save holder
1258   allocate_instance(field_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1259 
1260   // Have an oop instance buffer, copy into it
1261   data_for_oop(obj, dst_temp, field_klass);
1262   pop(alloc_temp);             // restore holder
1263   lea(src, Address(alloc_temp, field_offset));
1264   // call_VM_leaf, clobbers a few regs, save restore new obj
1265   push(obj);
1266   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, field_klass);
1267   pop(obj);
1268   pop(holder_klass);
1269   jmp(done);
1270 
1271   bind(empty_value);
1272   get_empty_inline_type_oop(field_klass, dst_temp, obj);
1273   pop(holder_klass);
1274   jmp(done);
1275 
1276   bind(alloc_failed);
1277   pop(obj);
1278   pop(holder_klass);
1279   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_inlined_field),
1280           obj, field_index, holder_klass);
1281 
1282   bind(done);
1283 }
1284 
1285 void InterpreterMacroAssembler::read_flattened_element(Register array, Register index,
1286                                                        Register t1, Register t2,
1287                                                        Register obj) {
1288   assert_different_registers(array, index, t1, t2);
1289   Label alloc_failed, empty_value, done;
1290   const Register array_klass = t2;
1291   const Register elem_klass = t1;
1292   const Register alloc_temp = LP64_ONLY(rscratch1) NOT_LP64(rsi);
1293   const Register dst_temp   = LP64_ONLY(rscratch2) NOT_LP64(rdi);
1294 
1295   // load in array->klass()->element_klass()
1296   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1297   load_klass(array_klass, array, tmp_load_klass);
1298   movptr(elem_klass, Address(array_klass, ArrayKlass::element_klass_offset()));
1299 
1300   //check for empty value klass
1301   test_klass_is_empty_inline_type(elem_klass, dst_temp, empty_value);
1302 
1303   // calc source into "array_klass" and free up some regs
1304   const Register src = array_klass;
1305   push(index); // preserve index reg in case alloc_failed
1306   data_for_value_array_index(array, array_klass, index, src);
1307 
1308   allocate_instance(elem_klass, obj, alloc_temp, dst_temp, false, alloc_failed);
1309   // Have an oop instance buffer, copy into it
1310   store_ptr(0, obj); // preserve obj (overwrite index, no longer needed)
1311   data_for_oop(obj, dst_temp, elem_klass);
1312   access_value_copy(IS_DEST_UNINITIALIZED, src, dst_temp, elem_klass);
1313   pop(obj);
1314   jmp(done);
1315 
1316   bind(empty_value);
1317   get_empty_inline_type_oop(elem_klass, dst_temp, obj);
1318   jmp(done);
1319 
1320   bind(alloc_failed);
1321   pop(index);
1322   if (array == c_rarg2) {
1323     mov(elem_klass, array);
1324     array = elem_klass;
1325   }
1326   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index);
1327 
1328   bind(done);
1329 }
1330 
1331 
1332 // Lock object
1333 //
1334 // Args:
1335 //      rdx, c_rarg1: BasicObjectLock to be used for locking
1336 //
1337 // Kills:
1338 //      rax, rbx
1339 void InterpreterMacroAssembler::lock_object(Register lock_reg) {
1340   assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
1341          "The argument is only for looks. It must be c_rarg1");
1342 
1343   if (UseHeavyMonitors) {
1344     call_VM(noreg,
1345             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1346             lock_reg);
1347   } else {
1348     Label done;
1349 
1350     const Register swap_reg = rax; // Must use rax for cmpxchg instruction

1357     const int mark_offset = lock_offset +
1358                             BasicLock::displaced_header_offset_in_bytes();
1359 
1360     Label slow_case;
1361 
1362     // Load object pointer into obj_reg
1363     movptr(obj_reg, Address(lock_reg, obj_offset));
1364 
1365     if (DiagnoseSyncOnValueBasedClasses != 0) {
1366       load_klass(tmp_reg, obj_reg, rklass_decode_tmp);
1367       movl(tmp_reg, Address(tmp_reg, Klass::access_flags_offset()));
1368       testl(tmp_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
1369       jcc(Assembler::notZero, slow_case);
1370     }
1371 
1372     // Load immediate 1 into swap_reg %rax
1373     movl(swap_reg, (int32_t)1);
1374 
1375     // Load (object->mark() | 1) into swap_reg %rax
1376     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1377     if (EnableValhalla) {
1378       // Mask inline_type bit such that we go to the slow path if object is an inline type
1379       andptr(swap_reg, ~((int) markWord::inline_type_bit_in_place));
1380     }
1381 
1382     // Save (object->mark() | 1) into BasicLock's displaced header
1383     movptr(Address(lock_reg, mark_offset), swap_reg);
1384 
1385     assert(lock_offset == 0,
1386            "displaced header must be first word in BasicObjectLock");
1387 
1388     lock();
1389     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1390     jcc(Assembler::zero, done);
1391 
1392     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1393 
1394     // Fast check for recursive lock.
1395     //
1396     // Can apply the optimization only if this is a stack lock
1397     // allocated in this thread. For efficiency, we can focus on
1398     // recently allocated stack locks (instead of reading the stack
1399     // base and checking whether 'mark' points inside the current
1400     // thread stack):

1699     test_method_data_pointer(mdp, profile_continue);
1700 
1701     // We are taking a branch.  Increment the taken count.
1702     // We inline increment_mdp_data_at to return bumped_count in a register
1703     //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
1704     Address data(mdp, in_bytes(JumpData::taken_offset()));
1705     movptr(bumped_count, data);
1706     assert(DataLayout::counter_increment == 1,
1707             "flow-free idiom only works with 1");
1708     addptr(bumped_count, DataLayout::counter_increment);
1709     sbbptr(bumped_count, 0);
1710     movptr(data, bumped_count); // Store back out
1711 
1712     // The method data pointer needs to be updated to reflect the new target.
1713     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1714     bind(profile_continue);
1715   }
1716 }
1717 
1718 
1719 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1720   if (ProfileInterpreter) {
1721     Label profile_continue;
1722 
1723     // If no method data exists, go to profile_continue.
1724     test_method_data_pointer(mdp, profile_continue);
1725 
1726     // We are taking a branch.  Increment the not taken count.
1727     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1728 
1729     // The method data pointer needs to be updated to correspond to
1730     // the next bytecode
1731     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()): in_bytes(BranchData::branch_data_size()));
1732     bind(profile_continue);
1733   }
1734 }
1735 
1736 void InterpreterMacroAssembler::profile_call(Register mdp) {
1737   if (ProfileInterpreter) {
1738     Label profile_continue;
1739 
1740     // If no method data exists, go to profile_continue.
1741     test_method_data_pointer(mdp, profile_continue);
1742 
1743     // We are making a call.  Increment the count.
1744     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1745 
1746     // The method data pointer needs to be updated to reflect the new target.
1747     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1748     bind(profile_continue);
1749   }
1750 }
1751 

2086     // case_array_offset_in_bytes()
2087     movl(reg2, in_bytes(MultiBranchData::per_case_size()));
2088     imulptr(index, reg2); // XXX l ?
2089     addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
2090 
2091     // Update the case count
2092     increment_mdp_data_at(mdp,
2093                           index,
2094                           in_bytes(MultiBranchData::relative_count_offset()));
2095 
2096     // The method data pointer needs to be updated.
2097     update_mdp_by_offset(mdp,
2098                          index,
2099                          in_bytes(MultiBranchData::
2100                                   relative_displacement_offset()));
2101 
2102     bind(profile_continue);
2103   }
2104 }
2105 
2106 void InterpreterMacroAssembler::profile_array(Register mdp,
2107                                               Register array,
2108                                               Register tmp) {
2109   if (ProfileInterpreter) {
2110     Label profile_continue;
2111 
2112     // If no method data exists, go to profile_continue.
2113     test_method_data_pointer(mdp, profile_continue);
2114 
2115     mov(tmp, array);
2116     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::array_offset())));
2117 
2118     Label not_flat;
2119     test_non_flattened_array_oop(array, tmp, not_flat);
2120 
2121     set_mdp_flag_at(mdp, ArrayLoadStoreData::flat_array_byte_constant());
2122 
2123     bind(not_flat);
2124 
2125     Label not_null_free;
2126     test_non_null_free_array_oop(array, tmp, not_null_free);
2127 
2128     set_mdp_flag_at(mdp, ArrayLoadStoreData::null_free_array_byte_constant());
2129 
2130     bind(not_null_free);
2131 
2132     bind(profile_continue);
2133   }
2134 }
2135 
2136 void InterpreterMacroAssembler::profile_element(Register mdp,
2137                                                 Register element,
2138                                                 Register tmp) {
2139   if (ProfileInterpreter) {
2140     Label profile_continue;
2141 
2142     // If no method data exists, go to profile_continue.
2143     test_method_data_pointer(mdp, profile_continue);
2144 
2145     mov(tmp, element);
2146     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadStoreData::element_offset())));
2147 
2148     // The method data pointer needs to be updated.
2149     update_mdp_by_constant(mdp, in_bytes(ArrayLoadStoreData::array_load_store_data_size()));
2150 
2151     bind(profile_continue);
2152   }
2153 }
2154 
2155 void InterpreterMacroAssembler::profile_acmp(Register mdp,
2156                                              Register left,
2157                                              Register right,
2158                                              Register tmp) {
2159   if (ProfileInterpreter) {
2160     Label profile_continue;
2161 
2162     // If no method data exists, go to profile_continue.
2163     test_method_data_pointer(mdp, profile_continue);
2164 
2165     mov(tmp, left);
2166     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
2167 
2168     Label left_not_inline_type;
2169     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
2170     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
2171     bind(left_not_inline_type);
2172 
2173     mov(tmp, right);
2174     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
2175 
2176     Label right_not_inline_type;
2177     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
2178     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
2179     bind(right_not_inline_type);
2180 
2181     bind(profile_continue);
2182   }
2183 }
2184 
2185 
2186 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
2187   if (state == atos) {
2188     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
2189   }
2190 }
2191 
2192 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2193 #ifndef _LP64
2194   if ((state == ftos && UseSSE < 1) ||
2195       (state == dtos && UseSSE < 2)) {
2196     MacroAssembler::verify_FPU(stack_depth);
2197   }
2198 #endif
2199 }
2200 
2201 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2202 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2203                                                         int increment, Address mask,
< prev index next >