22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interpreter/bytecodeHistogram.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interpreterRuntime.hpp"
35 #include "interpreter/templateInterpreterGenerator.hpp"
36 #include "interpreter/templateTable.hpp"
37 #include "oops/arrayOop.hpp"
38 #include "oops/methodCounters.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/method.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/resolvedIndyEntry.hpp"
43 #include "oops/resolvedMethodEntry.hpp"
44 #include "prims/jvmtiExport.hpp"
45 #include "prims/jvmtiThreadState.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/deoptimization.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/jniHandles.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubRoutines.hpp"
53 #include "runtime/synchronizer.hpp"
54 #include "runtime/timer.hpp"
55 #include "runtime/vframeArray.hpp"
56 #include "utilities/checkedCast.hpp"
57 #include "utilities/debug.hpp"
58 #include "utilities/macros.hpp"
59
60 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
61
62 // Size of interpreter code. Increase if too small. Interpreter will
63 // fail with a guarantee ("not enough space for interpreter generation");
64 // if too small.
65 // Run with +PrintInterpreter to get the VM to print out the size.
66 // Max size with JVMTI
67 #ifdef AMD64
68 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
69 #else
70 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
71 #endif // AMD64
72
73 // Global Register Names
74 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
75 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
76
77 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
78 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
79 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
80
81
82 //-----------------------------------------------------------------------------
83
84 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
85 address entry = __ pc();
86
87 #ifdef ASSERT
88 {
193 __ ffree(i);
194 }
195 } else if (UseSSE < 2) {
196 __ empty_FPU_stack();
197 }
198 #endif // COMPILER2
199 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
200 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
201 } else {
202 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
203 }
204
205 if (state == ftos) {
206 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
207 } else if (state == dtos) {
208 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
209 }
210 #endif // _LP64
211
212 // Restore stack bottom in case i2c adjusted stack
213 __ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
214 __ lea(rsp, Address(rbp, rcx, Address::times_ptr));
215 // and null it as marker that esp is now tos until next java call
216 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
217
218 __ restore_bcp();
219 __ restore_locals();
220
221 if (state == atos) {
222 Register mdp = rbx;
223 Register tmp = rcx;
224 __ profile_return_type(mdp, rax, tmp);
225 }
226
227 const Register cache = rbx;
228 const Register index = rcx;
229 if (index_size == sizeof(u4)) {
230 __ load_resolved_indy_entry(cache, index);
231 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
232 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
233 } else {
234 assert(index_size == sizeof(u2), "Can only be u2");
235 __ load_method_entry(cache, index);
236 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
237 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "compiler/compiler_globals.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "gc/shared/barrierSetAssembler.hpp"
31 #include "interpreter/bytecodeHistogram.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interpreterRuntime.hpp"
35 #include "interpreter/templateInterpreterGenerator.hpp"
36 #include "interpreter/templateTable.hpp"
37 #include "oops/arrayOop.hpp"
38 #include "oops/methodCounters.hpp"
39 #include "oops/methodData.hpp"
40 #include "oops/method.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/inlineKlass.hpp"
43 #include "oops/resolvedIndyEntry.hpp"
44 #include "oops/resolvedMethodEntry.hpp"
45 #include "prims/jvmtiExport.hpp"
46 #include "prims/jvmtiThreadState.hpp"
47 #include "runtime/continuation.hpp"
48 #include "runtime/deoptimization.hpp"
49 #include "runtime/frame.inline.hpp"
50 #include "runtime/globals.hpp"
51 #include "runtime/jniHandles.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/timer.hpp"
56 #include "runtime/vframeArray.hpp"
57 #include "utilities/checkedCast.hpp"
58 #include "utilities/debug.hpp"
59 #include "utilities/macros.hpp"
60
61 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
62
63 // Size of interpreter code. Increase if too small. Interpreter will
64 // fail with a guarantee ("not enough space for interpreter generation");
65 // if too small.
66 // Run with +PrintInterpreter to get the VM to print out the size.
67 // Max size with JVMTI
68 #ifdef AMD64
69 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(280) NOT_JVMCI(268) * 1024;
70 #else
71 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
72 #endif // AMD64
73
74 // Global Register Names
75 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
76 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
77
78 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
79 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
80 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
81
82
83 //-----------------------------------------------------------------------------
84
85 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
86 address entry = __ pc();
87
88 #ifdef ASSERT
89 {
194 __ ffree(i);
195 }
196 } else if (UseSSE < 2) {
197 __ empty_FPU_stack();
198 }
199 #endif // COMPILER2
200 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
201 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
202 } else {
203 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
204 }
205
206 if (state == ftos) {
207 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
208 } else if (state == dtos) {
209 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
210 }
211 #endif // _LP64
212
213 // Restore stack bottom in case i2c adjusted stack
214 __ movptr(rscratch1, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
215 __ lea(rsp, Address(rbp, rscratch1, Address::times_ptr));
216 // and null it as marker that esp is now tos until next java call
217 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
218
219 if (state == atos && InlineTypeReturnedAsFields) {
220 __ store_inline_type_fields_to_buf(NULL);
221 }
222
223 __ restore_bcp();
224 __ restore_locals();
225
226 if (state == atos) {
227 Register mdp = rbx;
228 Register tmp = rcx;
229 __ profile_return_type(mdp, rax, tmp);
230 }
231
232 const Register cache = rbx;
233 const Register index = rcx;
234 if (index_size == sizeof(u4)) {
235 __ load_resolved_indy_entry(cache, index);
236 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
237 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
238 } else {
239 assert(index_size == sizeof(u2), "Can only be u2");
240 __ load_method_entry(cache, index);
241 __ load_unsigned_short(cache, Address(cache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
242 __ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
|