< prev index next >

src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp

Print this page

 30 #include "code/vtableStubs.hpp"
 31 #include "interp_masm_aarch64.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
 54   // Can be null if there is no free space in the code cache.
 55   if (s == nullptr) {
 56     return nullptr;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 


 66   ResourceMark    rm;
 67   CodeBuffer      cb(s->entry_point(), stub_code_length);
 68   MacroAssembler* masm = new MacroAssembler(&cb);
 69 
 70 #if (!defined(PRODUCT) && defined(COMPILER2))
 71   if (CountCompiledCalls) {
 72     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 73     __ increment(Address(r16));
 74   }
 75 #endif
 76 
 77   // get receiver (need to skip return address on top of stack)
 78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 79 
 80   // get receiver klass
 81   address npe_addr = __ pc();
 82   __ load_klass(r16, j_rarg0);
 83 
 84 #ifndef PRODUCT
 85   if (DebugVtables) {

 99     const ptrdiff_t codesize = __ pc() - start_pc;
100     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
101     slop_bytes += slop_delta;
102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
103 
104     __ leave();
105     __ bind(L);
106   }
107 #endif // PRODUCT
108 
109   start_pc = __ pc();
110   __ lookup_virtual_method(r16, vtable_index, rmethod);
111   slop_delta  = 8 - (int)(__ pc() - start_pc);
112   slop_bytes += slop_delta;
113   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
114 
115 #ifndef PRODUCT
116   if (DebugVtables) {
117     Label L;
118     __ cbz(rmethod, L);
119     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
120     __ cbnz(rscratch1, L);
121     __ stop("Vtable entry is null");
122     __ bind(L);
123   }
124 #endif // PRODUCT
125 
126   // r0: receiver klass
127   // rmethod: Method*
128   // r2: receiver
129   address ame_addr = __ pc();
130   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
131   __ br(rscratch1);
132 
133   masm->flush();
134   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
135 
136   return s;
137 }
138 
139 
140 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
141   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
142   const int stub_code_length = code_size_limit(false);
143   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
144   // Can be null if there is no free space in the code cache.
145   if (s == nullptr) {
146     return nullptr;
147   }
148 
149   // Count unused bytes in instruction sequences of variable size.
150   // We add them to the computed buffer size in order to avoid
151   // overflow in subsequently generated stubs.
152   address   start_pc;
153   int       slop_bytes = 0;
154   int       slop_delta = 0;
155 


156   ResourceMark    rm;
157   CodeBuffer      cb(s->entry_point(), stub_code_length);
158   MacroAssembler* masm = new MacroAssembler(&cb);
159 
160 #if (!defined(PRODUCT) && defined(COMPILER2))
161   if (CountCompiledCalls) {
162     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
163     __ increment(Address(r10));
164   }
165 #endif
166 
167   // get receiver (need to skip return address on top of stack)
168   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
169 
170   // Entry arguments:
171   //  rscratch2: CompiledICData
172   //  j_rarg0: Receiver
173 
174   // This stub is called from compiled code which has no callee-saved registers,
175   // so all registers except arguments are free at this point.

190   // get receiver klass (also an implicit null-check)
191   address npe_addr = __ pc();
192   __ load_klass(recv_klass_reg, j_rarg0);
193 
194   // Receiver subtype check against REFC.
195   // Get selected method from declaring class and itable index
196   __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
197                                   temp_reg, temp_reg2, itable_index, L_no_such_interface);
198 
199   // Reduce "estimate" such that "padding" does not drop below 8.
200   const ptrdiff_t estimate = AOTCodeCache::is_on_for_dump() ? 148 : 144;
201   const ptrdiff_t codesize = __ pc() - start_pc;
202   slop_delta  = (int)(estimate - codesize);
203   slop_bytes += slop_delta;
204   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
205 
206 #ifdef ASSERT
207   if (DebugVtables) {
208     Label L2;
209     __ cbz(rmethod, L2);
210     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
211     __ cbnz(rscratch1, L2);
212     __ stop("compiler entrypoint is null");
213     __ bind(L2);
214   }
215 #endif // ASSERT
216 
217   // rmethod: Method*
218   // j_rarg0: receiver
219   address ame_addr = __ pc();
220   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
221   __ br(rscratch1);
222 
223   __ bind(L_no_such_interface);
224   // Handle IncompatibleClassChangeError in itable stubs.
225   // More detailed error message.
226   // We force resolving of the call site by jumping to the "handle
227   // wrong method" stub, and so let the interpreter runtime do all the
228   // dirty work.
229   assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
230   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
231 
232   masm->flush();
233   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
234 
235   return s;
236 }
237 
238 int VtableStub::pd_code_alignment() {
239   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
240   const unsigned int icache_line_size = 4;

 30 #include "code/vtableStubs.hpp"
 31 #include "interp_masm_aarch64.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "oops/instanceKlass.hpp"
 34 #include "oops/klassVtable.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "vmreg_aarch64.inline.hpp"
 37 #ifdef COMPILER2
 38 #include "opto/runtime.hpp"
 39 #endif
 40 
 41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
 42 // initialize its code
 43 
 44 #define __ masm->
 45 
 46 #ifndef PRODUCT
 47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 48 #endif
 49 
 50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
 51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 52   const int stub_code_length = code_size_limit(true);
 53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
 54   // Can be null if there is no free space in the code cache.
 55   if (s == nullptr) {
 56     return nullptr;
 57   }
 58 
 59   // Count unused bytes in instruction sequences of variable size.
 60   // We add them to the computed buffer size in order to avoid
 61   // overflow in subsequently generated stubs.
 62   address   start_pc;
 63   int       slop_bytes = 0;
 64   int       slop_delta = 0;
 65 
 66   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
 67 
 68   ResourceMark    rm;
 69   CodeBuffer      cb(s->entry_point(), stub_code_length);
 70   MacroAssembler* masm = new MacroAssembler(&cb);
 71 
 72 #if (!defined(PRODUCT) && defined(COMPILER2))
 73   if (CountCompiledCalls) {
 74     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 75     __ increment(Address(r16));
 76   }
 77 #endif
 78 
 79   // get receiver (need to skip return address on top of stack)
 80   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 81 
 82   // get receiver klass
 83   address npe_addr = __ pc();
 84   __ load_klass(r16, j_rarg0);
 85 
 86 #ifndef PRODUCT
 87   if (DebugVtables) {

101     const ptrdiff_t codesize = __ pc() - start_pc;
102     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
103     slop_bytes += slop_delta;
104     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
105 
106     __ leave();
107     __ bind(L);
108   }
109 #endif // PRODUCT
110 
111   start_pc = __ pc();
112   __ lookup_virtual_method(r16, vtable_index, rmethod);
113   slop_delta  = 8 - (int)(__ pc() - start_pc);
114   slop_bytes += slop_delta;
115   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
116 
117 #ifndef PRODUCT
118   if (DebugVtables) {
119     Label L;
120     __ cbz(rmethod, L);
121     __ ldr(rscratch1, Address(rmethod, entry_offset));
122     __ cbnz(rscratch1, L);
123     __ stop("Vtable entry is null");
124     __ bind(L);
125   }
126 #endif // PRODUCT
127 
128   // r0: receiver klass
129   // rmethod: Method*
130   // r2: receiver
131   address ame_addr = __ pc();
132   __ ldr(rscratch1, Address(rmethod, entry_offset));
133   __ br(rscratch1);
134 
135   masm->flush();
136   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
137 
138   return s;
139 }
140 
141 
142 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
143   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
144   const int stub_code_length = code_size_limit(false);
145   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
146   // Can be null if there is no free space in the code cache.
147   if (s == nullptr) {
148     return nullptr;
149   }
150 
151   // Count unused bytes in instruction sequences of variable size.
152   // We add them to the computed buffer size in order to avoid
153   // overflow in subsequently generated stubs.
154   address   start_pc;
155   int       slop_bytes = 0;
156   int       slop_delta = 0;
157 
158   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() :  Method::from_compiled_inline_ro_offset();
159 
160   ResourceMark    rm;
161   CodeBuffer      cb(s->entry_point(), stub_code_length);
162   MacroAssembler* masm = new MacroAssembler(&cb);
163 
164 #if (!defined(PRODUCT) && defined(COMPILER2))
165   if (CountCompiledCalls) {
166     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
167     __ increment(Address(r10));
168   }
169 #endif
170 
171   // get receiver (need to skip return address on top of stack)
172   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
173 
174   // Entry arguments:
175   //  rscratch2: CompiledICData
176   //  j_rarg0: Receiver
177 
178   // This stub is called from compiled code which has no callee-saved registers,
179   // so all registers except arguments are free at this point.

194   // get receiver klass (also an implicit null-check)
195   address npe_addr = __ pc();
196   __ load_klass(recv_klass_reg, j_rarg0);
197 
198   // Receiver subtype check against REFC.
199   // Get selected method from declaring class and itable index
200   __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
201                                   temp_reg, temp_reg2, itable_index, L_no_such_interface);
202 
203   // Reduce "estimate" such that "padding" does not drop below 8.
204   const ptrdiff_t estimate = AOTCodeCache::is_on_for_dump() ? 148 : 144;
205   const ptrdiff_t codesize = __ pc() - start_pc;
206   slop_delta  = (int)(estimate - codesize);
207   slop_bytes += slop_delta;
208   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
209 
210 #ifdef ASSERT
211   if (DebugVtables) {
212     Label L2;
213     __ cbz(rmethod, L2);
214     __ ldr(rscratch1, Address(rmethod, entry_offset));
215     __ cbnz(rscratch1, L2);
216     __ stop("compiler entrypoint is null");
217     __ bind(L2);
218   }
219 #endif // ASSERT
220 
221   // rmethod: Method*
222   // j_rarg0: receiver
223   address ame_addr = __ pc();
224   __ ldr(rscratch1, Address(rmethod, entry_offset));
225   __ br(rscratch1);
226 
227   __ bind(L_no_such_interface);
228   // Handle IncompatibleClassChangeError in itable stubs.
229   // More detailed error message.
230   // We force resolving of the call site by jumping to the "handle
231   // wrong method" stub, and so let the interpreter runtime do all the
232   // dirty work.
233   assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
234   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
235 
236   masm->flush();
237   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
238 
239   return s;
240 }
241 
242 int VtableStub::pd_code_alignment() {
243   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
244   const unsigned int icache_line_size = 4;
< prev index next >