< prev index next >

src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp

Print this page

 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 54   // Can be NULL if there is no free space in the code cache.
 55   if (s == NULL) {
 56     return NULL;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 




 66   ResourceMark    rm;
 67   CodeBuffer      cb(s->entry_point(), stub_code_length);
 68   MacroAssembler* masm = new MacroAssembler(&cb);
 69 
 70 #if (!defined(PRODUCT) && defined(COMPILER2))
 71   if (CountCompiledCalls) {
 72     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 73     __ increment(Address(r16));
 74   }
 75 #endif
 76 
 77   // get receiver (need to skip return address on top of stack)
 78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 79 
 80   // get receiver klass
 81   address npe_addr = __ pc();
 82   __ load_klass(r16, j_rarg0);
 83 
 84 #ifndef PRODUCT
 85   if (DebugVtables) {

 99     const ptrdiff_t codesize = __ pc() - start_pc;
100     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
101     slop_bytes += slop_delta;
102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
103 
104     __ leave();
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   start_pc = __ pc();
110   __ lookup_virtual_method(r16, vtable_index, rmethod);
111   slop_delta  = 8 - (int)(__ pc() - start_pc);
112   slop_bytes += slop_delta;
113   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
114 
115 #ifndef PRODUCT
116   if (DebugVtables) {
117     Label L;
118     __ cbz(rmethod, L);
119     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
120     __ cbnz(rscratch1, L);
121     __ stop("Vtable entry is NULL");
122     __ bind(L);
123   }
124 #endif // PRODUCT
125 
126   // r0: receiver klass
127   // rmethod: Method*
128   // r2: receiver
129   address ame_addr = __ pc();
130   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
131   __ br(rscratch1);
132 
133   masm->flush();
134   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);

135 
136   return s;
137 }
138 
139 
140 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
141   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
142   const int stub_code_length = code_size_limit(false);
143   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
144   // Can be NULL if there is no free space in the code cache.
145   if (s == NULL) {
146     return NULL;
147   }
148 
149   // Count unused bytes in instruction sequences of variable size.
150   // We add them to the computed buffer size in order to avoid
151   // overflow in subsequently generated stubs.
152   address   start_pc;
153   int       slop_bytes = 0;
154   int       slop_delta = 0;
155 




156   ResourceMark    rm;
157   CodeBuffer      cb(s->entry_point(), stub_code_length);
158   MacroAssembler* masm = new MacroAssembler(&cb);
159 
160 #if (!defined(PRODUCT) && defined(COMPILER2))
161   if (CountCompiledCalls) {
162     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
163     __ increment(Address(r10));
164   }
165 #endif
166 
167   // get receiver (need to skip return address on top of stack)
168   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
169 
170   // Entry arguments:
171   //  rscratch2: CompiledICHolder
172   //  j_rarg0: Receiver
173 
174   // This stub is called from compiled code which has no callee-saved registers,
175   // so all registers except arguments are free at this point.

205   // Get selected method from declaring class and itable index
206   __ lookup_interface_method(// inputs: rec. class, interface, itable index
207                              recv_klass_reg, holder_klass_reg, itable_index,
208                              // outputs: method, scan temp. reg
209                              rmethod, temp_reg,
210                              L_no_such_interface);
211 
212   const ptrdiff_t lookupSize = __ pc() - start_pc;
213 
214   // Reduce "estimate" such that "padding" does not drop below 8.
215   const ptrdiff_t estimate = 124;
216   const ptrdiff_t codesize = typecheckSize + lookupSize;
217   slop_delta  = (int)(estimate - codesize);
218   slop_bytes += slop_delta;
219   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
220 
221 #ifdef ASSERT
222   if (DebugVtables) {
223     Label L2;
224     __ cbz(rmethod, L2);
225     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
226     __ cbnz(rscratch1, L2);
227     __ stop("compiler entrypoint is null");
228     __ bind(L2);
229   }
230 #endif // ASSERT
231 
232   // rmethod: Method*
233   // j_rarg0: receiver
234   address ame_addr = __ pc();
235   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
236   __ br(rscratch1);
237 
238   __ bind(L_no_such_interface);
239   // Handle IncompatibleClassChangeError in itable stubs.
240   // More detailed error message.
241   // We force resolving of the call site by jumping to the "handle
242   // wrong method" stub, and so let the interpreter runtime do all the
243   // dirty work.
244   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
245   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
246 
247   masm->flush();
248   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);

249 
250   return s;
251 }
252 
253 int VtableStub::pd_code_alignment() {
254   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
255   const unsigned int icache_line_size = 4;
256   return icache_line_size;
257 }

 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
 54   // Can be NULL if there is no free space in the code cache.
 55   if (s == NULL) {
 56     return NULL;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
 67   const int index_dependent_slop     = 0;
 68   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
 69 
 70   ResourceMark    rm;
 71   CodeBuffer      cb(s->entry_point(), stub_code_length);
 72   MacroAssembler* masm = new MacroAssembler(&cb);
 73 
 74 #if (!defined(PRODUCT) && defined(COMPILER2))
 75   if (CountCompiledCalls) {
 76     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 77     __ increment(Address(r16));
 78   }
 79 #endif
 80 
 81   // get receiver (need to skip return address on top of stack)
 82   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 83 
 84   // get receiver klass
 85   address npe_addr = __ pc();
 86   __ load_klass(r16, j_rarg0);
 87 
 88 #ifndef PRODUCT
 89   if (DebugVtables) {

103     const ptrdiff_t codesize = __ pc() - start_pc;
104     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
105     slop_bytes += slop_delta;
106     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
107 
108     __ leave();
109     __ bind(L);
110   }
111 #endif // PRODUCT
112 
113   start_pc = __ pc();
114   __ lookup_virtual_method(r16, vtable_index, rmethod);
115   slop_delta  = 8 - (int)(__ pc() - start_pc);
116   slop_bytes += slop_delta;
117   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
118 
119 #ifndef PRODUCT
120   if (DebugVtables) {
121     Label L;
122     __ cbz(rmethod, L);
123     __ ldr(rscratch1, Address(rmethod, entry_offset));
124     __ cbnz(rscratch1, L);
125     __ stop("Vtable entry is NULL");
126     __ bind(L);
127   }
128 #endif // PRODUCT
129 
130   // r0: receiver klass
131   // rmethod: Method*
132   // r2: receiver
133   address ame_addr = __ pc();
134   __ ldr(rscratch1, Address(rmethod, entry_offset));
135   __ br(rscratch1);
136 
137   masm->flush();
138   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
139   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
140 
141   return s;
142 }
143 
144 
145 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
146   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
147   const int stub_code_length = code_size_limit(false);
148   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
149   // Can be NULL if there is no free space in the code cache.
150   if (s == NULL) {
151     return NULL;
152   }
153 
154   // Count unused bytes in instruction sequences of variable size.
155   // We add them to the computed buffer size in order to avoid
156   // overflow in subsequently generated stubs.
157   address   start_pc;
158   int       slop_bytes = 0;
159   int       slop_delta = 0;
160 
161   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
162                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
163   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
164 
165   ResourceMark    rm;
166   CodeBuffer      cb(s->entry_point(), stub_code_length);
167   MacroAssembler* masm = new MacroAssembler(&cb);
168 
169 #if (!defined(PRODUCT) && defined(COMPILER2))
170   if (CountCompiledCalls) {
171     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
172     __ increment(Address(r10));
173   }
174 #endif
175 
176   // get receiver (need to skip return address on top of stack)
177   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
178 
179   // Entry arguments:
180   //  rscratch2: CompiledICHolder
181   //  j_rarg0: Receiver
182 
183   // This stub is called from compiled code which has no callee-saved registers,
184   // so all registers except arguments are free at this point.

214   // Get selected method from declaring class and itable index
215   __ lookup_interface_method(// inputs: rec. class, interface, itable index
216                              recv_klass_reg, holder_klass_reg, itable_index,
217                              // outputs: method, scan temp. reg
218                              rmethod, temp_reg,
219                              L_no_such_interface);
220 
221   const ptrdiff_t lookupSize = __ pc() - start_pc;
222 
223   // Reduce "estimate" such that "padding" does not drop below 8.
224   const ptrdiff_t estimate = 124;
225   const ptrdiff_t codesize = typecheckSize + lookupSize;
226   slop_delta  = (int)(estimate - codesize);
227   slop_bytes += slop_delta;
228   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
229 
230 #ifdef ASSERT
231   if (DebugVtables) {
232     Label L2;
233     __ cbz(rmethod, L2);
234     __ ldr(rscratch1, Address(rmethod, entry_offset));
235     __ cbnz(rscratch1, L2);
236     __ stop("compiler entrypoint is null");
237     __ bind(L2);
238   }
239 #endif // ASSERT
240 
241   // rmethod: Method*
242   // j_rarg0: receiver
243   address ame_addr = __ pc();
244   __ ldr(rscratch1, Address(rmethod, entry_offset));
245   __ br(rscratch1);
246 
247   __ bind(L_no_such_interface);
248   // Handle IncompatibleClassChangeError in itable stubs.
249   // More detailed error message.
250   // We force resolving of the call site by jumping to the "handle
251   // wrong method" stub, and so let the interpreter runtime do all the
252   // dirty work.
253   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
254   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
255 
256   masm->flush();
257   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
258   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
259 
260   return s;
261 }
262 
263 int VtableStub::pd_code_alignment() {
264   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
265   const unsigned int icache_line_size = 4;
266   return icache_line_size;
267 }
< prev index next >