< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/markOop.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"

  34 #include "prims/jvmtiExport.hpp"
  35 #include "prims/jvmtiThreadState.hpp"
  36 #include "runtime/basicLock.hpp"
  37 #include "runtime/biasedLocking.hpp"
  38 #include "runtime/frame.inline.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 
  43 // Implementation of InterpreterMacroAssembler
  44 
  45 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  46   assert(entry, "Entry must have been generated by now");
  47   jump(RuntimeAddress(entry));
  48 }
  49 
  50 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  51   Label update, next, none;
  52 
  53   verify_oop(obj);


 976         bool throw_monitor_exception,
 977         bool install_monitor_exception,
 978         bool notify_jvmdi) {
 979   // Note: Registers rdx xmm0 may be in use for the
 980   // result check if synchronized method
 981   Label unlocked, unlock, no_unlock;
 982 
 983   const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 984   const Register robj    = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
 985   const Register rmon    = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
 986                               // monitor pointers need different register
 987                               // because rdx may have the result in it
 988   NOT_LP64(get_thread(rcx);)
 989 
 990   // get the value of _do_not_unlock_if_synchronized into rdx
 991   const Address do_not_unlock_if_synchronized(rthread,
 992     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 993   movbool(rbx, do_not_unlock_if_synchronized);
 994   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 995 
 996  // get method access flags
 997   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 998   movl(rcx, Address(rcx, Method::access_flags_offset()));
 999   testl(rcx, JVM_ACC_SYNCHRONIZED);
1000   jcc(Assembler::zero, unlocked);
1001 
1002   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1003   // is set.
1004   testbool(rbx);
1005   jcc(Assembler::notZero, no_unlock);
1006 
1007   // unlock monitor
1008   push(state); // save result
1009 
1010   // BasicObjectLock will be first in list, since this is a
1011   // synchronized method. However, need to check that the object has
1012   // not been unlocked by an explicit monitorexit bytecode.
1013   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1014                         wordSize - (int) sizeof(BasicObjectLock));
1015   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1016   // register for unlock_object to pass to VM directly


1100     bind(loop);
1101     // check if current entry is used
1102     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1103     jcc(Assembler::notEqual, exception);
1104 
1105     addptr(rmon, entry_size); // otherwise advance to next entry
1106     bind(entry);
1107     cmpptr(rmon, rbx); // check if bottom reached
1108     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1109   }
1110 
1111   bind(no_unlock);
1112 
1113   // jvmti support
1114   if (notify_jvmdi) {
1115     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1116   } else {
1117     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1118   }
1119 
1120   // remove activation
1121   // get sender sp
1122   movptr(rbx,
1123          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1124   if (StackReservedPages > 0) {


1125     // testing if reserved zone needs to be re-enabled
1126     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1127     Label no_reserved_zone_enabling;
1128 
1129     NOT_LP64(get_thread(rthread);)
1130 
1131     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled);
1132     jcc(Assembler::equal, no_reserved_zone_enabling);
1133 
1134     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1135     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1136 
1137     call_VM_leaf(
1138       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1139     call_VM(noreg, CAST_FROM_FN_PTR(address,
1140                    InterpreterRuntime::throw_delayed_StackOverflowError));
1141     should_not_reach_here();
1142 
1143     bind(no_reserved_zone_enabling);
1144   }

































1145   leave();                           // remove frame anchor
1146   pop(ret_addr);                     // get return address
1147   mov(rsp, rbx);                     // set sp to sender sp
1148 }
1149 
1150 void InterpreterMacroAssembler::get_method_counters(Register method,
1151                                                     Register mcs, Label& skip) {
1152   Label has_counters;
1153   movptr(mcs, Address(method, Method::method_counters_offset()));
1154   testptr(mcs, mcs);
1155   jcc(Assembler::notZero, has_counters);
1156   call_VM(noreg, CAST_FROM_FN_PTR(address,
1157           InterpreterRuntime::build_method_counters), method);
1158   movptr(mcs, Address(method,Method::method_counters_offset()));
1159   testptr(mcs, mcs);
1160   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1161   bind(has_counters);
1162 }
1163 
1164 


1187 
1188     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1189     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1190     const int mark_offset = lock_offset +
1191                             BasicLock::displaced_header_offset_in_bytes();
1192 
1193     Label slow_case;
1194 
1195     // Load object pointer into obj_reg
1196     movptr(obj_reg, Address(lock_reg, obj_offset));
1197 
1198     if (UseBiasedLocking) {
1199       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
1200     }
1201 
1202     // Load immediate 1 into swap_reg %rax
1203     movl(swap_reg, (int32_t)1);
1204 
1205     // Load (object->mark() | 1) into swap_reg %rax
1206     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));




1207 
1208     // Save (object->mark() | 1) into BasicLock's displaced header
1209     movptr(Address(lock_reg, mark_offset), swap_reg);
1210 
1211     assert(lock_offset == 0,
1212            "displaced header must be first word in BasicObjectLock");
1213 
1214     lock();
1215     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1216     if (PrintBiasedLockingStatistics) {
1217       cond_inc32(Assembler::zero,
1218                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1219     }
1220     jcc(Assembler::zero, done);
1221 
1222     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1223 
1224     // Test if the oopMark is an obvious stack pointer, i.e.,
1225     //  1) (mark & zero_bits) == 0, and
1226     //  2) rsp <= mark < mark + os::pagesize()




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interp_masm_x86.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/markOop.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.hpp"
  34 #include "oops/valueKlass.hpp"
  35 #include "prims/jvmtiExport.hpp"
  36 #include "prims/jvmtiThreadState.hpp"
  37 #include "runtime/basicLock.hpp"
  38 #include "runtime/biasedLocking.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/safepointMechanism.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 
  44 // Implementation of InterpreterMacroAssembler
  45 
  46 void InterpreterMacroAssembler::jump_to_entry(address entry) {
  47   assert(entry, "Entry must have been generated by now");
  48   jump(RuntimeAddress(entry));
  49 }
  50 
  51 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
  52   Label update, next, none;
  53 
  54   verify_oop(obj);


 977         bool throw_monitor_exception,
 978         bool install_monitor_exception,
 979         bool notify_jvmdi) {
 980   // Note: Registers rdx xmm0 may be in use for the
 981   // result check if synchronized method
 982   Label unlocked, unlock, no_unlock;
 983 
 984   const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 985   const Register robj    = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
 986   const Register rmon    = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
 987                               // monitor pointers need different register
 988                               // because rdx may have the result in it
 989   NOT_LP64(get_thread(rcx);)
 990 
 991   // get the value of _do_not_unlock_if_synchronized into rdx
 992   const Address do_not_unlock_if_synchronized(rthread,
 993     in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 994   movbool(rbx, do_not_unlock_if_synchronized);
 995   movbool(do_not_unlock_if_synchronized, false); // reset the flag
 996 
 997   // get method access flags
 998   movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
 999   movl(rcx, Address(rcx, Method::access_flags_offset()));
1000   testl(rcx, JVM_ACC_SYNCHRONIZED);
1001   jcc(Assembler::zero, unlocked);
1002 
1003   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1004   // is set.
1005   testbool(rbx);
1006   jcc(Assembler::notZero, no_unlock);
1007 
1008   // unlock monitor
1009   push(state); // save result
1010 
1011   // BasicObjectLock will be first in list, since this is a
1012   // synchronized method. However, need to check that the object has
1013   // not been unlocked by an explicit monitorexit bytecode.
1014   const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
1015                         wordSize - (int) sizeof(BasicObjectLock));
1016   // We use c_rarg1/rdx so that if we go slow path it will be the correct
1017   // register for unlock_object to pass to VM directly


1101     bind(loop);
1102     // check if current entry is used
1103     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1104     jcc(Assembler::notEqual, exception);
1105 
1106     addptr(rmon, entry_size); // otherwise advance to next entry
1107     bind(entry);
1108     cmpptr(rmon, rbx); // check if bottom reached
1109     jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1110   }
1111 
1112   bind(no_unlock);
1113 
1114   // jvmti support
1115   if (notify_jvmdi) {
1116     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
1117   } else {
1118     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1119   }
1120 




1121   if (StackReservedPages > 0) {
1122     movptr(rbx,
1123                Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1124     // testing if reserved zone needs to be re-enabled
1125     Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
1126     Label no_reserved_zone_enabling;
1127 
1128     NOT_LP64(get_thread(rthread);)
1129 
1130     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled);
1131     jcc(Assembler::equal, no_reserved_zone_enabling);
1132 
1133     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1134     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1135 
1136     call_VM_leaf(
1137       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1138     call_VM(noreg, CAST_FROM_FN_PTR(address,
1139                    InterpreterRuntime::throw_delayed_StackOverflowError));
1140     should_not_reach_here();
1141 
1142     bind(no_reserved_zone_enabling);
1143   }
1144 
1145   // remove activation
1146   // get sender sp
1147   movptr(rbx,
1148          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1149 
1150   if (state == atos && ValueTypeReturnedAsFields) {
1151     Label skip;
1152     // Test if the return type is a value type
1153     movptr(rdi, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
1154     movptr(rdi, Address(rdi, Method::const_offset()));
1155     load_unsigned_byte(rdi, Address(rdi, ConstMethod::result_type_offset()));
1156     cmpl(rdi, T_VALUETYPE);
1157     jcc(Assembler::notEqual, skip);
1158 
1159     // We are returning a value type, load its fields into registers
1160 #ifndef _LP64
1161     super_call_VM_leaf(StubRoutines::load_value_type_fields_in_regs());
1162 #else
1163     // Load fields from a buffered value with a value class specific handler
1164     load_klass(rdi, rax);
1165     movptr(rdi, Address(rdi, InstanceKlass::adr_valueklass_fixed_block_offset()));
1166     movptr(rdi, Address(rdi, ValueKlass::unpack_handler_offset()));
1167 
1168     testptr(rdi, rdi);
1169     jcc(Assembler::equal, skip);
1170 
1171     call(rdi);
1172 #endif
1173     // call above kills the value in rbx. Reload it.
1174     movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
1175     bind(skip);
1176   }
1177   leave();                           // remove frame anchor
1178   pop(ret_addr);                     // get return address
1179   mov(rsp, rbx);                     // set sp to sender sp
1180 }
1181 
1182 void InterpreterMacroAssembler::get_method_counters(Register method,
1183                                                     Register mcs, Label& skip) {
1184   Label has_counters;
1185   movptr(mcs, Address(method, Method::method_counters_offset()));
1186   testptr(mcs, mcs);
1187   jcc(Assembler::notZero, has_counters);
1188   call_VM(noreg, CAST_FROM_FN_PTR(address,
1189           InterpreterRuntime::build_method_counters), method);
1190   movptr(mcs, Address(method,Method::method_counters_offset()));
1191   testptr(mcs, mcs);
1192   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1193   bind(has_counters);
1194 }
1195 
1196 


1219 
1220     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1221     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1222     const int mark_offset = lock_offset +
1223                             BasicLock::displaced_header_offset_in_bytes();
1224 
1225     Label slow_case;
1226 
1227     // Load object pointer into obj_reg
1228     movptr(obj_reg, Address(lock_reg, obj_offset));
1229 
1230     if (UseBiasedLocking) {
1231       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
1232     }
1233 
1234     // Load immediate 1 into swap_reg %rax
1235     movl(swap_reg, (int32_t)1);
1236 
1237     // Load (object->mark() | 1) into swap_reg %rax
1238     orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1239     if (EnableValhalla && !UseBiasedLocking) {
1240       // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
1241       andptr(swap_reg, ~markOopDesc::biased_lock_bit_in_place);
1242     }
1243 
1244     // Save (object->mark() | 1) into BasicLock's displaced header
1245     movptr(Address(lock_reg, mark_offset), swap_reg);
1246 
1247     assert(lock_offset == 0,
1248            "displaced header must be first word in BasicObjectLock");
1249 
1250     lock();
1251     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1252     if (PrintBiasedLockingStatistics) {
1253       cond_inc32(Assembler::zero,
1254                  ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1255     }
1256     jcc(Assembler::zero, done);
1257 
1258     const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1259 
1260     // Test if the oopMark is an obvious stack pointer, i.e.,
1261     //  1) (mark & zero_bits) == 0, and
1262     //  2) rsp <= mark < mark + os::pagesize()


< prev index next >