< prev index next >

src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp

Print this page

 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 54   // Can be null if there is no free space in the code cache.
 55   if (s == nullptr) {
 56     return nullptr;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 




 66   ResourceMark    rm;
 67   CodeBuffer      cb(s->entry_point(), stub_code_length);
 68   MacroAssembler* masm = new MacroAssembler(&cb);
 69 
 70 #if (!defined(PRODUCT) && defined(COMPILER2))
 71   if (CountCompiledCalls) {
 72     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 73     __ increment(Address(r16));
 74   }
 75 #endif
 76 
 77   // get receiver (need to skip return address on top of stack)
 78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 79 
 80   // get receiver klass
 81   address npe_addr = __ pc();
 82   __ load_klass(r16, j_rarg0);
 83 
 84 #ifndef PRODUCT
 85   if (DebugVtables) {

 99     const ptrdiff_t codesize = __ pc() - start_pc;
100     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
101     slop_bytes += slop_delta;
102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
103 
104     __ leave();
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   start_pc = __ pc();
110   __ lookup_virtual_method(r16, vtable_index, rmethod);
111   slop_delta  = 8 - (int)(__ pc() - start_pc);
112   slop_bytes += slop_delta;
113   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
114 
115 #ifndef PRODUCT
116   if (DebugVtables) {
117     Label L;
118     __ cbz(rmethod, L);
119     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
120     __ cbnz(rscratch1, L);
121     __ stop("Vtable entry is null");
122     __ bind(L);
123   }
124 #endif // PRODUCT
125 
126   // r0: receiver klass
127   // rmethod: Method*
128   // r2: receiver
129   address ame_addr = __ pc();
130   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
131   __ br(rscratch1);
132 
133   masm->flush();
134   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);

135 
136   return s;
137 }
138 
139 
140 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
141   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
142   const int stub_code_length = code_size_limit(false);
143   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
144   // Can be null if there is no free space in the code cache.
145   if (s == nullptr) {
146     return nullptr;
147   }
148 
149   // Count unused bytes in instruction sequences of variable size.
150   // We add them to the computed buffer size in order to avoid
151   // overflow in subsequently generated stubs.
152   address   start_pc;
153   int       slop_bytes = 0;
154   int       slop_delta = 0;
155 




156   ResourceMark    rm;
157   CodeBuffer      cb(s->entry_point(), stub_code_length);
158   MacroAssembler* masm = new MacroAssembler(&cb);
159 
160 #if (!defined(PRODUCT) && defined(COMPILER2))
161   if (CountCompiledCalls) {
162     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
163     __ increment(Address(r10));
164   }
165 #endif
166 
167   // get receiver (need to skip return address on top of stack)
168   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
169 
170   // Entry arguments:
171   //  rscratch2: CompiledICHolder
172   //  j_rarg0: Receiver
173 
174   // This stub is called from compiled code which has no callee-saved registers,
175   // so all registers except arguments are free at this point.

190   // get receiver klass (also an implicit null-check)
191   address npe_addr = __ pc();
192   __ load_klass(recv_klass_reg, j_rarg0);
193 
194   // Receiver subtype check against REFC.
195   // Get selected method from declaring class and itable index
196   __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
197                                   temp_reg, temp_reg2, itable_index, L_no_such_interface);
198 
199   // Reduce "estimate" such that "padding" does not drop below 8.
200   const ptrdiff_t estimate = 124;
201   const ptrdiff_t codesize = __ pc() - start_pc;
202   slop_delta  = (int)(estimate - codesize);
203   slop_bytes += slop_delta;
204   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
205 
206 #ifdef ASSERT
207   if (DebugVtables) {
208     Label L2;
209     __ cbz(rmethod, L2);
210     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
211     __ cbnz(rscratch1, L2);
212     __ stop("compiler entrypoint is null");
213     __ bind(L2);
214   }
215 #endif // ASSERT
216 
217   // rmethod: Method*
218   // j_rarg0: receiver
219   address ame_addr = __ pc();
220   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
221   __ br(rscratch1);
222 
223   __ bind(L_no_such_interface);
224   // Handle IncompatibleClassChangeError in itable stubs.
225   // More detailed error message.
226   // We force resolving of the call site by jumping to the "handle
227   // wrong method" stub, and so let the interpreter runtime do all the
228   // dirty work.
229   assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
230   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
231 
232   masm->flush();
233   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);

234 
235   return s;
236 }
237 
238 int VtableStub::pd_code_alignment() {
239   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
240   const unsigned int icache_line_size = 4;
241   return icache_line_size;
242 }

 30 #include "interp_masm_aarch64.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/compiledICHolder.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
 54   // Can be null if there is no free space in the code cache.
 55   if (s == nullptr) {
 56     return nullptr;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
 67   const int index_dependent_slop     = 0;
 68   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
 69 
 70   ResourceMark    rm;
 71   CodeBuffer      cb(s->entry_point(), stub_code_length);
 72   MacroAssembler* masm = new MacroAssembler(&cb);
 73 
 74 #if (!defined(PRODUCT) && defined(COMPILER2))
 75   if (CountCompiledCalls) {
 76     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 77     __ increment(Address(r16));
 78   }
 79 #endif
 80 
 81   // get receiver (need to skip return address on top of stack)
 82   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 83 
 84   // get receiver klass
 85   address npe_addr = __ pc();
 86   __ load_klass(r16, j_rarg0);
 87 
 88 #ifndef PRODUCT
 89   if (DebugVtables) {

103     const ptrdiff_t codesize = __ pc() - start_pc;
104     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
105     slop_bytes += slop_delta;
106     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
107 
108     __ leave();
109     __ bind(L);
110   }
111 #endif // PRODUCT
112 
113   start_pc = __ pc();
114   __ lookup_virtual_method(r16, vtable_index, rmethod);
115   slop_delta  = 8 - (int)(__ pc() - start_pc);
116   slop_bytes += slop_delta;
117   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
118 
119 #ifndef PRODUCT
120   if (DebugVtables) {
121     Label L;
122     __ cbz(rmethod, L);
123     __ ldr(rscratch1, Address(rmethod, entry_offset));
124     __ cbnz(rscratch1, L);
125     __ stop("Vtable entry is null");
126     __ bind(L);
127   }
128 #endif // PRODUCT
129 
130   // r0: receiver klass
131   // rmethod: Method*
132   // r2: receiver
133   address ame_addr = __ pc();
134   __ ldr(rscratch1, Address(rmethod, entry_offset));
135   __ br(rscratch1);
136 
137   masm->flush();
138   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
139   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
140 
141   return s;
142 }
143 
144 
145 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
146   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
147   const int stub_code_length = code_size_limit(false);
148   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
149   // Can be null if there is no free space in the code cache.
150   if (s == nullptr) {
151     return nullptr;
152   }
153 
154   // Count unused bytes in instruction sequences of variable size.
155   // We add them to the computed buffer size in order to avoid
156   // overflow in subsequently generated stubs.
157   address   start_pc;
158   int       slop_bytes = 0;
159   int       slop_delta = 0;
160 
161   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
162                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
163   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
164 
165   ResourceMark    rm;
166   CodeBuffer      cb(s->entry_point(), stub_code_length);
167   MacroAssembler* masm = new MacroAssembler(&cb);
168 
169 #if (!defined(PRODUCT) && defined(COMPILER2))
170   if (CountCompiledCalls) {
171     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
172     __ increment(Address(r10));
173   }
174 #endif
175 
176   // get receiver (need to skip return address on top of stack)
177   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
178 
179   // Entry arguments:
180   //  rscratch2: CompiledICHolder
181   //  j_rarg0: Receiver
182 
183   // This stub is called from compiled code which has no callee-saved registers,
184   // so all registers except arguments are free at this point.

199   // get receiver klass (also an implicit null-check)
200   address npe_addr = __ pc();
201   __ load_klass(recv_klass_reg, j_rarg0);
202 
203   // Receiver subtype check against REFC.
204   // Get selected method from declaring class and itable index
205   __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
206                                   temp_reg, temp_reg2, itable_index, L_no_such_interface);
207 
208   // Reduce "estimate" such that "padding" does not drop below 8.
209   const ptrdiff_t estimate = 124;
210   const ptrdiff_t codesize = __ pc() - start_pc;
211   slop_delta  = (int)(estimate - codesize);
212   slop_bytes += slop_delta;
213   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
214 
215 #ifdef ASSERT
216   if (DebugVtables) {
217     Label L2;
218     __ cbz(rmethod, L2);
219     __ ldr(rscratch1, Address(rmethod, entry_offset));
220     __ cbnz(rscratch1, L2);
221     __ stop("compiler entrypoint is null");
222     __ bind(L2);
223   }
224 #endif // ASSERT
225 
226   // rmethod: Method*
227   // j_rarg0: receiver
228   address ame_addr = __ pc();
229   __ ldr(rscratch1, Address(rmethod, entry_offset));
230   __ br(rscratch1);
231 
232   __ bind(L_no_such_interface);
233   // Handle IncompatibleClassChangeError in itable stubs.
234   // More detailed error message.
235   // We force resolving of the call site by jumping to the "handle
236   // wrong method" stub, and so let the interpreter runtime do all the
237   // dirty work.
238   assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
239   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
240 
241   masm->flush();
242   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
243   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
244 
245   return s;
246 }
247 
248 int VtableStub::pd_code_alignment() {
249   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
250   const unsigned int icache_line_size = 4;
251   return icache_line_size;
252 }
< prev index next >