< prev index next >

src/hotspot/cpu/x86/vtableStubs_x86_64.cpp

Print this page

 28 #include "interp_masm_x86.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "oops/compiledICHolder.hpp"
 31 #include "oops/instanceKlass.hpp"
 32 #include "oops/klassVtable.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "vmreg_x86.inline.hpp"
 35 #ifdef COMPILER2
 36 #include "opto/runtime.hpp"
 37 #endif
 38 
 39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 40 // initialize its code
 41 
 42 #define __ masm->
 43 
 44 #ifndef PRODUCT
 45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 46 #endif
 47 
 48 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 49   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 50   const int stub_code_length = code_size_limit(true);
 51   Register tmp_load_klass = rscratch1;
 52   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 53   // Can be NULL if there is no free space in the code cache.
 54   if (s == NULL) {
 55     return NULL;
 56   }
 57 
 58   // Count unused bytes in instruction sequences of variable size.
 59   // We add them to the computed buffer size in order to avoid
 60   // overflow in subsequently generated stubs.
 61   address   start_pc;
 62   int       slop_bytes = 0;
 63   int       slop_delta = 0;
 64   // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
 65   const int index_dependent_slop     = 0;

 66 
 67   ResourceMark    rm;
 68   CodeBuffer      cb(s->entry_point(), stub_code_length);
 69   MacroAssembler* masm = new MacroAssembler(&cb);
 70 
 71 #if (!defined(PRODUCT) && defined(COMPILER2))
 72   if (CountCompiledCalls) {
 73     __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 74   }
 75 #endif
 76 
 77   // get receiver (need to skip return address on top of stack)
 78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 79 
 80   // Free registers (non-args) are rax, rbx
 81 
 82   // get receiver klass
 83   address npe_addr = __ pc();
 84   __ load_klass(rax, j_rarg0, tmp_load_klass);
 85 

102     slop_bytes += slop_delta;
103     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
104     __ bind(L);
105   }
106 #endif // PRODUCT
107 
108   const Register method = rbx;
109 
110   // load Method* and target address
111   start_pc = __ pc();
112   __ lookup_virtual_method(rax, vtable_index, method);
113   slop_delta  = 8 - (int)(__ pc() - start_pc);
114   slop_bytes += slop_delta;
115   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
116 
117 #ifndef PRODUCT
118   if (DebugVtables) {
119     Label L;
120     __ cmpptr(method, (int32_t)NULL_WORD);
121     __ jcc(Assembler::equal, L);
122     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
123     __ jcc(Assembler::notZero, L);
124     __ stop("Vtable entry is NULL");
125     __ bind(L);
126   }
127 #endif // PRODUCT
128 
129   // rax: receiver klass
130   // method (rbx): Method*
131   // rcx: receiver
132   address ame_addr = __ pc();
133   __ jmp( Address(rbx, Method::from_compiled_offset()));
134 
135   masm->flush();
136   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
137   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
138 
139   return s;
140 }
141 
142 
143 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
144   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
145   const int stub_code_length = code_size_limit(false);
146   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);

147   // Can be NULL if there is no free space in the code cache.
148   if (s == NULL) {
149     return NULL;
150   }
151 
152   // Count unused bytes in instruction sequences of variable size.
153   // We add them to the computed buffer size in order to avoid
154   // overflow in subsequently generated stubs.
155   address   start_pc;
156   int       slop_bytes = 0;
157   int       slop_delta = 0;
158   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
159                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
160 
161   ResourceMark    rm;
162   CodeBuffer      cb(s->entry_point(), stub_code_length);
163   MacroAssembler *masm = new MacroAssembler(&cb);
164 
165 #if (!defined(PRODUCT) && defined(COMPILER2))
166   if (CountCompiledCalls) {

201                              L_no_such_interface,
202                              /*return_method=*/false);
203 
204   const ptrdiff_t  typecheckSize = __ pc() - start_pc;
205   start_pc = __ pc();
206 
207   // Get selected method from declaring class and itable index
208   const Register method = rbx;
209   __ load_klass(recv_klass_reg, j_rarg0, temp_reg);   // restore recv_klass_reg
210   __ lookup_interface_method(// inputs: rec. class, interface, itable index
211                              recv_klass_reg, holder_klass_reg, itable_index,
212                              // outputs: method, scan temp. reg
213                              method, temp_reg,
214                              L_no_such_interface);
215 
216   const ptrdiff_t  lookupSize = __ pc() - start_pc;
217 
218   // We expect we need index_dependent_slop extra bytes. Reason:
219   // The emitted code in lookup_interface_method changes when itable_index exceeds 15.
220   // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130).
221   const ptrdiff_t estimate = 136;
222   const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
223   slop_delta  = (int)(estimate - codesize);
224   slop_bytes += slop_delta;
225   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
226 
227   // If we take a trap while this arg is on the stack we will not
228   // be able to walk the stack properly. This is not an issue except
229   // when there are mistakes in this assembly code that could generate
230   // a spurious fault. Ask me how I know...
231 
232   // method (rbx): Method*
233   // j_rarg0: receiver
234 
235 #ifdef ASSERT
236   if (DebugVtables) {
237     Label L2;
238     __ cmpptr(method, (int32_t)NULL_WORD);
239     __ jcc(Assembler::equal, L2);
240     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
241     __ jcc(Assembler::notZero, L2);
242     __ stop("compiler entrypoint is null");
243     __ bind(L2);
244   }
245 #endif // ASSERT
246 
247   address ame_addr = __ pc();
248   __ jmp(Address(method, Method::from_compiled_offset()));
249 
250   __ bind(L_no_such_interface);
251   // Handle IncompatibleClassChangeError in itable stubs.
252   // More detailed error message.
253   // We force resolving of the call site by jumping to the "handle
254   // wrong method" stub, and so let the interpreter runtime do all the
255   // dirty work.
256   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
257 
258   masm->flush();
259   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
260   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
261 
262   return s;
263 }
264 
265 int VtableStub::pd_code_alignment() {
266   // x86 cache line size is 64 bytes, but we want to limit alignment loss.
267   const unsigned int icache_line_size = wordSize;
268   return icache_line_size;

 28 #include "interp_masm_x86.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "oops/compiledICHolder.hpp"
 31 #include "oops/instanceKlass.hpp"
 32 #include "oops/klassVtable.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "vmreg_x86.inline.hpp"
 35 #ifdef COMPILER2
 36 #include "opto/runtime.hpp"
 37 #endif
 38 
 39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 40 // initialize its code
 41 
 42 #define __ masm->
 43 
 44 #ifndef PRODUCT
 45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 46 #endif
 47 
 48 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
 49   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 50   const int stub_code_length = code_size_limit(true);
 51   Register tmp_load_klass = rscratch1;
 52   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
 53   // Can be NULL if there is no free space in the code cache.
 54   if (s == NULL) {
 55     return NULL;
 56   }
 57 
 58   // Count unused bytes in instruction sequences of variable size.
 59   // We add them to the computed buffer size in order to avoid
 60   // overflow in subsequently generated stubs.
 61   address   start_pc;
 62   int       slop_bytes = 0;
 63   int       slop_delta = 0;
 64   // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
 65   const int index_dependent_slop     = 0;
 66   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
 67 
 68   ResourceMark    rm;
 69   CodeBuffer      cb(s->entry_point(), stub_code_length);
 70   MacroAssembler* masm = new MacroAssembler(&cb);
 71 
 72 #if (!defined(PRODUCT) && defined(COMPILER2))
 73   if (CountCompiledCalls) {
 74     __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 75   }
 76 #endif
 77 
 78   // get receiver (need to skip return address on top of stack)
 79   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 80 
 81   // Free registers (non-args) are rax, rbx
 82 
 83   // get receiver klass
 84   address npe_addr = __ pc();
 85   __ load_klass(rax, j_rarg0, tmp_load_klass);
 86 

103     slop_bytes += slop_delta;
104     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   const Register method = rbx;
110 
111   // load Method* and target address
112   start_pc = __ pc();
113   __ lookup_virtual_method(rax, vtable_index, method);
114   slop_delta  = 8 - (int)(__ pc() - start_pc);
115   slop_bytes += slop_delta;
116   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
117 
118 #ifndef PRODUCT
119   if (DebugVtables) {
120     Label L;
121     __ cmpptr(method, (int32_t)NULL_WORD);
122     __ jcc(Assembler::equal, L);
123     __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD);
124     __ jcc(Assembler::notZero, L);
125     __ stop("Vtable entry is NULL");
126     __ bind(L);
127   }
128 #endif // PRODUCT
129 
130   // rax: receiver klass
131   // method (rbx): Method*
132   // rcx: receiver
133   address ame_addr = __ pc();
134   __ jmp( Address(rbx, entry_offset));
135 
136   masm->flush();
137   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
138   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
139 
140   return s;
141 }
142 
143 
144 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
145   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
146   const int stub_code_length = code_size_limit(false);
147   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
148   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
149   // Can be NULL if there is no free space in the code cache.
150   if (s == NULL) {
151     return NULL;
152   }
153 
154   // Count unused bytes in instruction sequences of variable size.
155   // We add them to the computed buffer size in order to avoid
156   // overflow in subsequently generated stubs.
157   address   start_pc;
158   int       slop_bytes = 0;
159   int       slop_delta = 0;
160   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
161                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
162 
163   ResourceMark    rm;
164   CodeBuffer      cb(s->entry_point(), stub_code_length);
165   MacroAssembler *masm = new MacroAssembler(&cb);
166 
167 #if (!defined(PRODUCT) && defined(COMPILER2))
168   if (CountCompiledCalls) {

203                              L_no_such_interface,
204                              /*return_method=*/false);
205 
206   const ptrdiff_t  typecheckSize = __ pc() - start_pc;
207   start_pc = __ pc();
208 
209   // Get selected method from declaring class and itable index
210   const Register method = rbx;
211   __ load_klass(recv_klass_reg, j_rarg0, temp_reg);   // restore recv_klass_reg
212   __ lookup_interface_method(// inputs: rec. class, interface, itable index
213                              recv_klass_reg, holder_klass_reg, itable_index,
214                              // outputs: method, scan temp. reg
215                              method, temp_reg,
216                              L_no_such_interface);
217 
218   const ptrdiff_t  lookupSize = __ pc() - start_pc;
219 
220   // We expect we need index_dependent_slop extra bytes. Reason:
221   // The emitted code in lookup_interface_method changes when itable_index exceeds 15.
222   // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130).
223   const ptrdiff_t estimate = 144;
224   const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
225   slop_delta  = (int)(estimate - codesize);
226   slop_bytes += slop_delta;
227   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
228 
229   // If we take a trap while this arg is on the stack we will not
230   // be able to walk the stack properly. This is not an issue except
231   // when there are mistakes in this assembly code that could generate
232   // a spurious fault. Ask me how I know...
233 
234   // method (rbx): Method*
235   // j_rarg0: receiver
236 
237 #ifdef ASSERT
238   if (DebugVtables) {
239     Label L2;
240     __ cmpptr(method, (int32_t)NULL_WORD);
241     __ jcc(Assembler::equal, L2);
242     __ cmpptr(Address(method, entry_offset), (int32_t)NULL_WORD);
243     __ jcc(Assembler::notZero, L2);
244     __ stop("compiler entrypoint is null");
245     __ bind(L2);
246   }
247 #endif // ASSERT
248 
249   address ame_addr = __ pc();
250   __ jmp(Address(method, entry_offset));
251 
252   __ bind(L_no_such_interface);
253   // Handle IncompatibleClassChangeError in itable stubs.
254   // More detailed error message.
255   // We force resolving of the call site by jumping to the "handle
256   // wrong method" stub, and so let the interpreter runtime do all the
257   // dirty work.
258   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
259 
260   masm->flush();
261   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
262   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
263 
264   return s;
265 }
266 
267 int VtableStub::pd_code_alignment() {
268   // x86 cache line size is 64 bytes, but we want to limit alignment loss.
269   const unsigned int icache_line_size = wordSize;
270   return icache_line_size;
< prev index next >