< prev index next >

src/hotspot/cpu/x86/interp_masm_x86.cpp

Print this page

 804   prepare_to_jump_from_interpreted();
 805 
 806   if (JvmtiExport::can_post_interpreter_events()) {
 807     Label run_compiled_code;
 808     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 809     // compiled code in threads for which the event is enabled.  Check here for
 810     // interp_only_mode if these events CAN be enabled.
 811     // interp_only is an int, on little endian it is sufficient to test the byte only
 812     // Is a cmpl faster?
 813     LP64_ONLY(temp = r15_thread;)
 814     NOT_LP64(get_thread(temp);)
 815     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 816     jccb(Assembler::zero, run_compiled_code);
 817     jmp(Address(method, Method::interpreter_entry_offset()));
 818     bind(run_compiled_code);
 819   }
 820 
 821   jmp(Address(method, Method::from_interpreted_offset()));
 822 }
 823 


























































 824 // The following two routines provide a hook so that an implementation
 825 // can schedule the dispatch in two parts.  x86 does not do this.
 826 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 827   // Nothing x86 specific to be done here
 828 }
 829 
 830 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 831   dispatch_next(state, step);
 832 }
 833 
 834 void InterpreterMacroAssembler::dispatch_base(TosState state,
 835                                               address* table,
 836                                               bool verifyoop,
 837                                               bool generate_poll) {
 838   verify_FPU(1, state);
 839   if (VerifyActivationFrameSize) {
 840     Label L;
 841     mov(rcx, rbp);
 842     subptr(rcx, rsp);
 843     int32_t min_frame_size =

1047   if (throw_monitor_exception) {
1048     // Entry already unlocked, need to throw exception
1049     NOT_LP64(empty_FPU_stack();)  // remove possible return value from FPU-stack, otherwise stack could overflow
1050     call_VM(noreg, CAST_FROM_FN_PTR(address,
1051                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1052     should_not_reach_here();
1053   } else {
1054     // Monitor already unlocked during a stack unroll. If requested,
1055     // install an illegal_monitor_state_exception.  Continue with
1056     // stack unrolling.
1057     if (install_monitor_exception) {
1058       NOT_LP64(empty_FPU_stack();)
1059       call_VM(noreg, CAST_FROM_FN_PTR(address,
1060                      InterpreterRuntime::new_illegal_monitor_state_exception));
1061     }
1062     jmp(unlocked);
1063   }
1064 
1065   bind(unlock);
1066   unlock_object(robj);



1067   pop(state);
1068 
1069   // Check that for block-structured locking (i.e., that all locked
1070   // objects has been unlocked)
1071   bind(unlocked);
1072 
1073   // rax, rdx: Might contain return value
1074 
1075   // Check that all monitors are unlocked
1076   {
1077     Label loop, exception, entry, restart;
1078     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1079     const Address monitor_block_top(
1080         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1081     const Address monitor_block_bot(
1082         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1083 
1084     bind(restart);
1085     // We use c_rarg1 so that if we go slow path it will be the correct
1086     // register for unlock_object to pass to VM directly

1091     jmp(entry);
1092 
1093     // Entry already locked, need to throw exception
1094     bind(exception);
1095 
1096     if (throw_monitor_exception) {
1097       // Throw exception
1098       NOT_LP64(empty_FPU_stack();)
1099       MacroAssembler::call_VM(noreg,
1100                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
1101                                    throw_illegal_monitor_state_exception));
1102       should_not_reach_here();
1103     } else {
1104       // Stack unrolling. Unlock object and install illegal_monitor_exception.
1105       // Unlock does not block, so don't have to worry about the frame.
1106       // We don't have to preserve c_rarg1 since we are going to throw an exception.
1107 
1108       push(state);
1109       mov(robj, rmon);   // nop if robj and rmon are the same
1110       unlock_object(robj);


1111       pop(state);
1112 
1113       if (install_monitor_exception) {
1114         NOT_LP64(empty_FPU_stack();)
1115         call_VM(noreg, CAST_FROM_FN_PTR(address,
1116                                         InterpreterRuntime::
1117                                         new_illegal_monitor_state_exception));
1118       }
1119 
1120       jmp(restart);
1121     }
1122 
1123     bind(loop);
1124     // check if current entry is used
1125     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1126     jcc(Assembler::notEqual, exception);
1127 
1128     addptr(rmon, entry_size); // otherwise advance to next entry
1129     bind(entry);
1130     cmpptr(rmon, rbx); // check if bottom reached

1151 
1152     NOT_LP64(get_thread(rthread);)
1153 
1154     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1155     jcc(Assembler::equal, no_reserved_zone_enabling);
1156 
1157     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1158     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1159 
1160     call_VM_leaf(
1161       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1162     call_VM(noreg, CAST_FROM_FN_PTR(address,
1163                    InterpreterRuntime::throw_delayed_StackOverflowError));
1164     should_not_reach_here();
1165 
1166     bind(no_reserved_zone_enabling);
1167   }
1168   leave();                           // remove frame anchor
1169   pop(ret_addr);                     // get return address
1170   mov(rsp, rbx);                     // set sp to sender sp

1171 }
1172 
1173 void InterpreterMacroAssembler::get_method_counters(Register method,
1174                                                     Register mcs, Label& skip) {
1175   Label has_counters;
1176   movptr(mcs, Address(method, Method::method_counters_offset()));
1177   testptr(mcs, mcs);
1178   jcc(Assembler::notZero, has_counters);
1179   call_VM(noreg, CAST_FROM_FN_PTR(address,
1180           InterpreterRuntime::build_method_counters), method);
1181   movptr(mcs, Address(method,Method::method_counters_offset()));
1182   testptr(mcs, mcs);
1183   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1184   bind(has_counters);
1185 }
1186 
1187 
1188 // Lock object
1189 //
1190 // Args:

1261     // because we have guard pages at the end of all stacks. Hence, if
1262     // we go over the stack base and hit the stack of another thread,
1263     // this should not be in a writeable area that could contain a
1264     // stack lock allocated by that thread. As a consequence, a stack
1265     // lock less than page size away from rsp is guaranteed to be
1266     // owned by the current thread.
1267     //
1268     // These 3 tests can be done by evaluating the following
1269     // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1270     // assuming both stack pointer and pagesize have their
1271     // least significant bits clear.
1272     // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1273     subptr(swap_reg, rsp);
1274     andptr(swap_reg, zero_bits - os::vm_page_size());
1275 
1276     // Save the test result, for recursive case, the result is zero
1277     movptr(Address(lock_reg, mark_offset), swap_reg);
1278     jcc(Assembler::zero, done);
1279 
1280     bind(slow_case);
1281 
1282     // Call the runtime routine for slow case
1283     call_VM(noreg,
1284             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1285             lock_reg);
1286 
1287     bind(done);
1288   }
1289 }
1290 
1291 
1292 // Unlocks an object. Used in monitorexit bytecode and
1293 // remove_activation.  Throws an IllegalMonitorException if object is
1294 // not locked by current thread.
1295 //
1296 // Args:
1297 //      rdx, c_rarg1: BasicObjectLock for lock
1298 //
1299 // Kills:
1300 //      rax
1301 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)

1332 
1333     // Test for recursion
1334     testptr(header_reg, header_reg);
1335 
1336     // zero for recursive case
1337     jcc(Assembler::zero, done);
1338 
1339     // Atomic swap back the old header
1340     lock();
1341     cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1342 
1343     // zero for simple unlock of a stack-lock case
1344     jcc(Assembler::zero, done);
1345 
1346 
1347     // Call the runtime routine for slow case.
1348     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
1349     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1350 
1351     bind(done);
1352 
1353     restore_bcp();
1354   }
1355 }
1356 
1357 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1358                                                          Label& zero_continue) {
1359   assert(ProfileInterpreter, "must be profiling interpreter");
1360   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1361   testptr(mdp, mdp);
1362   jcc(Assembler::zero, zero_continue);
1363 }
1364 
1365 
1366 // Set the method data pointer for the current bcp.
1367 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1368   assert(ProfileInterpreter, "must be profiling interpreter");
1369   Label set_mdp;
1370   push(rax);
1371   push(rbx);
1372 

 804   prepare_to_jump_from_interpreted();
 805 
 806   if (JvmtiExport::can_post_interpreter_events()) {
 807     Label run_compiled_code;
 808     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 809     // compiled code in threads for which the event is enabled.  Check here for
 810     // interp_only_mode if these events CAN be enabled.
 811     // interp_only is an int, on little endian it is sufficient to test the byte only
 812     // Is a cmpl faster?
 813     LP64_ONLY(temp = r15_thread;)
 814     NOT_LP64(get_thread(temp);)
 815     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 816     jccb(Assembler::zero, run_compiled_code);
 817     jmp(Address(method, Method::interpreter_entry_offset()));
 818     bind(run_compiled_code);
 819   }
 820 
 821   jmp(Address(method, Method::from_interpreted_offset()));
 822 }
 823 
 824 // void InterpreterMacroAssembler::resolve_special(Register rmethod, LinkInfo link_info) {
 825 //   CallInfo callinfo;
 826 //   LinkResolver::resolve_special_call(callinfo, Handle(), link_info, Thread::current());
 827 //   methodHandle methodh = callinfo.selected_method();
 828 //   assert(methodh.not_null(), "should have thrown exception");
 829 //   Method* method = methodh();
 830 //   tty->print_cr("call_Java_final method: " INTPTR_FORMAT " name: %s", p2i(method), method->name()->as_C_string());
 831 //   // tty->print_cr("call_Java_final const: " INTPTR_FORMAT ", params: %d locals %d", p2i(method->constMethod()), method->constMethod()->_size_of_parameters, method->constMethod()->_max_locals);
 832 
 833 //   movptr(rmethod, AddressLiteral((address)method, RelocationHolder::none).addr());
 834 // }
 835 
 836 // void InterpreterMacroAssembler::get_entry(Register entry, Register method) {
 837 //   // TODO: see InterpreterMacroAssembler::jump_from_interpreted for special cases
 838 //   Label done;
 839 //   // if (JvmtiExport::can_post_interpreter_events()) {
 840 //   //   Register temp;
 841 //   //   Label run_compiled_code;
 842 //   //   // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 843 //   //   // compiled code in threads for which the event is enabled.  Check here for
 844 //   //   // interp_only_mode if these events CAN be enabled.
 845 //   //   // interp_only is an int, on little endian it is sufficient to test the byte only
 846 //   //   // Is a cmpl faster?
 847 //   //   LP64_ONLY(temp = r15_thread;)
 848 //   //   NOT_LP64(get_thread(temp);)
 849 //   //   cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
 850 //   //   jccb(Assembler::zero, run_compiled_code);
 851 //   //   movptr(entry, Address(method, Method::interpreter_entry_offset()));
 852 //   //   bind(run_compiled_code);
 853 //   // }
 854 //   movptr(entry, Address(method, Method::from_interpreted_offset()));
 855 //   bind(done);
 856 // }
 857 
 858 // // loads method into rbx
 859 // void InterpreterMacroAssembler::get_entry(Register entry, LinkInfo link_info) {
 860 //   resolve_special(rbx, link_info);
 861 //   get_entry(entry, rbx);
 862 // }
 863 
 864 // void InterpreterMacroAssembler::call_Java_final(LinkInfo link_info) {
 865 //   Register rentry = rax;
 866 //   get_entry(rentry, link_info);
 867 
 868 //   // profile_call(rax); // ?? rax
 869 //   // profile_arguments_type(rax, rbx, rbcp, false);
 870 //   call(rentry);
 871 // }
 872 
 873 // void InterpreterMacroAssembler::jump_Java_final(LinkInfo link_info) {
 874 //   Register rentry = rax;
 875 //   get_entry(rentry, link_info);
 876 
 877 //   // profile_call(rax); // ?? rax
 878 //   // profile_arguments_type(rax, rbx, rbcp, false);
 879 //   jmp(rentry);
 880 // }
 881 
 882 // The following two routines provide a hook so that an implementation
 883 // can schedule the dispatch in two parts.  x86 does not do this.
 884 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
 885   // Nothing x86 specific to be done here
 886 }
 887 
 888 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
 889   dispatch_next(state, step);
 890 }
 891 
 892 void InterpreterMacroAssembler::dispatch_base(TosState state,
 893                                               address* table,
 894                                               bool verifyoop,
 895                                               bool generate_poll) {
 896   verify_FPU(1, state);
 897   if (VerifyActivationFrameSize) {
 898     Label L;
 899     mov(rcx, rbp);
 900     subptr(rcx, rsp);
 901     int32_t min_frame_size =

1105   if (throw_monitor_exception) {
1106     // Entry already unlocked, need to throw exception
1107     NOT_LP64(empty_FPU_stack();)  // remove possible return value from FPU-stack, otherwise stack could overflow
1108     call_VM(noreg, CAST_FROM_FN_PTR(address,
1109                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1110     should_not_reach_here();
1111   } else {
1112     // Monitor already unlocked during a stack unroll. If requested,
1113     // install an illegal_monitor_state_exception.  Continue with
1114     // stack unrolling.
1115     if (install_monitor_exception) {
1116       NOT_LP64(empty_FPU_stack();)
1117       call_VM(noreg, CAST_FROM_FN_PTR(address,
1118                      InterpreterRuntime::new_illegal_monitor_state_exception));
1119     }
1120     jmp(unlocked);
1121   }
1122 
1123   bind(unlock);
1124   unlock_object(robj);
1125   NOT_LP64(get_thread(rthread);)
1126   dec_held_monitor_count(rthread);
1127 
1128   pop(state);
1129 
1130   // Check that for block-structured locking (i.e., that all locked
1131   // objects has been unlocked)
1132   bind(unlocked);
1133 
1134   // rax, rdx: Might contain return value
1135 
1136   // Check that all monitors are unlocked
1137   {
1138     Label loop, exception, entry, restart;
1139     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1140     const Address monitor_block_top(
1141         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1142     const Address monitor_block_bot(
1143         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1144 
1145     bind(restart);
1146     // We use c_rarg1 so that if we go slow path it will be the correct
1147     // register for unlock_object to pass to VM directly

1152     jmp(entry);
1153 
1154     // Entry already locked, need to throw exception
1155     bind(exception);
1156 
1157     if (throw_monitor_exception) {
1158       // Throw exception
1159       NOT_LP64(empty_FPU_stack();)
1160       MacroAssembler::call_VM(noreg,
1161                               CAST_FROM_FN_PTR(address, InterpreterRuntime::
1162                                    throw_illegal_monitor_state_exception));
1163       should_not_reach_here();
1164     } else {
1165       // Stack unrolling. Unlock object and install illegal_monitor_exception.
1166       // Unlock does not block, so don't have to worry about the frame.
1167       // We don't have to preserve c_rarg1 since we are going to throw an exception.
1168 
1169       push(state);
1170       mov(robj, rmon);   // nop if robj and rmon are the same
1171       unlock_object(robj);
1172       NOT_LP64(get_thread(rthread);)
1173       dec_held_monitor_count(rthread);
1174       pop(state);
1175 
1176       if (install_monitor_exception) {
1177         NOT_LP64(empty_FPU_stack();)
1178         call_VM(noreg, CAST_FROM_FN_PTR(address,
1179                                         InterpreterRuntime::
1180                                         new_illegal_monitor_state_exception));
1181       }
1182 
1183       jmp(restart);
1184     }
1185 
1186     bind(loop);
1187     // check if current entry is used
1188     cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1189     jcc(Assembler::notEqual, exception);
1190 
1191     addptr(rmon, entry_size); // otherwise advance to next entry
1192     bind(entry);
1193     cmpptr(rmon, rbx); // check if bottom reached

1214 
1215     NOT_LP64(get_thread(rthread);)
1216 
1217     cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
1218     jcc(Assembler::equal, no_reserved_zone_enabling);
1219 
1220     cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
1221     jcc(Assembler::lessEqual, no_reserved_zone_enabling);
1222 
1223     call_VM_leaf(
1224       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
1225     call_VM(noreg, CAST_FROM_FN_PTR(address,
1226                    InterpreterRuntime::throw_delayed_StackOverflowError));
1227     should_not_reach_here();
1228 
1229     bind(no_reserved_zone_enabling);
1230   }
1231   leave();                           // remove frame anchor
1232   pop(ret_addr);                     // get return address
1233   mov(rsp, rbx);                     // set sp to sender sp
1234   pop_cont_fastpath(rthread);
1235 }
1236 
1237 void InterpreterMacroAssembler::get_method_counters(Register method,
1238                                                     Register mcs, Label& skip) {
1239   Label has_counters;
1240   movptr(mcs, Address(method, Method::method_counters_offset()));
1241   testptr(mcs, mcs);
1242   jcc(Assembler::notZero, has_counters);
1243   call_VM(noreg, CAST_FROM_FN_PTR(address,
1244           InterpreterRuntime::build_method_counters), method);
1245   movptr(mcs, Address(method,Method::method_counters_offset()));
1246   testptr(mcs, mcs);
1247   jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
1248   bind(has_counters);
1249 }
1250 
1251 
1252 // Lock object
1253 //
1254 // Args:

1325     // because we have guard pages at the end of all stacks. Hence, if
1326     // we go over the stack base and hit the stack of another thread,
1327     // this should not be in a writeable area that could contain a
1328     // stack lock allocated by that thread. As a consequence, a stack
1329     // lock less than page size away from rsp is guaranteed to be
1330     // owned by the current thread.
1331     //
1332     // These 3 tests can be done by evaluating the following
1333     // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1334     // assuming both stack pointer and pagesize have their
1335     // least significant bits clear.
1336     // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
1337     subptr(swap_reg, rsp);
1338     andptr(swap_reg, zero_bits - os::vm_page_size());
1339 
1340     // Save the test result, for recursive case, the result is zero
1341     movptr(Address(lock_reg, mark_offset), swap_reg);
1342     jcc(Assembler::zero, done);
1343 
1344     bind(slow_case);

1345     // Call the runtime routine for slow case
1346     call_VM(noreg,
1347             CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1348             lock_reg);
1349 
1350     bind(done);
1351   }
1352 }
1353 
1354 
1355 // Unlocks an object. Used in monitorexit bytecode and
1356 // remove_activation.  Throws an IllegalMonitorException if object is
1357 // not locked by current thread.
1358 //
1359 // Args:
1360 //      rdx, c_rarg1: BasicObjectLock for lock
1361 //
1362 // Kills:
1363 //      rax
1364 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)

1395 
1396     // Test for recursion
1397     testptr(header_reg, header_reg);
1398 
1399     // zero for recursive case
1400     jcc(Assembler::zero, done);
1401 
1402     // Atomic swap back the old header
1403     lock();
1404     cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1405 
1406     // zero for simple unlock of a stack-lock case
1407     jcc(Assembler::zero, done);
1408 
1409 
1410     // Call the runtime routine for slow case.
1411     movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
1412     call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1413 
1414     bind(done);

1415     restore_bcp();
1416   }
1417 }
1418 
1419 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
1420                                                          Label& zero_continue) {
1421   assert(ProfileInterpreter, "must be profiling interpreter");
1422   movptr(mdp, Address(rbp, frame::interpreter_frame_mdp_offset * wordSize));
1423   testptr(mdp, mdp);
1424   jcc(Assembler::zero, zero_continue);
1425 }
1426 
1427 
1428 // Set the method data pointer for the current bcp.
1429 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1430   assert(ProfileInterpreter, "must be profiling interpreter");
1431   Label set_mdp;
1432   push(rax);
1433   push(rbx);
1434 
< prev index next >