29 #include "code/vtableStubs.hpp"
30 #include "interp_masm_aarch64.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_aarch64.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 // machine-dependent part of VtableStubs: create VtableStub of correct size and
41 // initialize its code
42
43 #define __ masm->
44
45 #ifndef PRODUCT
46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
47 #endif
48
49 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
50 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
51 const int stub_code_length = code_size_limit(true);
52 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
53 // Can be null if there is no free space in the code cache.
54 if (s == nullptr) {
55 return nullptr;
56 }
57
58 // Count unused bytes in instruction sequences of variable size.
59 // We add them to the computed buffer size in order to avoid
60 // overflow in subsequently generated stubs.
61 address start_pc;
62 int slop_bytes = 0;
63 int slop_delta = 0;
64
65 ResourceMark rm;
66 CodeBuffer cb(s->entry_point(), stub_code_length);
67 MacroAssembler* masm = new MacroAssembler(&cb);
68
69 #if (!defined(PRODUCT) && defined(COMPILER2))
70 if (CountCompiledCalls) {
71 __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
72 __ increment(Address(r16));
73 }
74 #endif
75
76 // get receiver (need to skip return address on top of stack)
77 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
78
79 // get receiver klass
80 address npe_addr = __ pc();
81 __ load_klass(r16, j_rarg0);
82
83 #ifndef PRODUCT
84 if (DebugVtables) {
98 const ptrdiff_t codesize = __ pc() - start_pc;
99 slop_delta = estimate - codesize; // call_VM varies in length, depending on data
100 slop_bytes += slop_delta;
101 assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
102
103 __ leave();
104 __ bind(L);
105 }
106 #endif // PRODUCT
107
108 start_pc = __ pc();
109 __ lookup_virtual_method(r16, vtable_index, rmethod);
110 slop_delta = 8 - (int)(__ pc() - start_pc);
111 slop_bytes += slop_delta;
112 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
113
114 #ifndef PRODUCT
115 if (DebugVtables) {
116 Label L;
117 __ cbz(rmethod, L);
118 __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
119 __ cbnz(rscratch1, L);
120 __ stop("Vtable entry is null");
121 __ bind(L);
122 }
123 #endif // PRODUCT
124
125 // r0: receiver klass
126 // rmethod: Method*
127 // r2: receiver
128 address ame_addr = __ pc();
129 __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
130 __ br(rscratch1);
131
132 masm->flush();
133 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
134
135 return s;
136 }
137
138
139 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
140 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
141 const int stub_code_length = code_size_limit(false);
142 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
143 // Can be null if there is no free space in the code cache.
144 if (s == nullptr) {
145 return nullptr;
146 }
147
148 // Count unused bytes in instruction sequences of variable size.
149 // We add them to the computed buffer size in order to avoid
150 // overflow in subsequently generated stubs.
151 address start_pc;
152 int slop_bytes = 0;
153 int slop_delta = 0;
154
155 ResourceMark rm;
156 CodeBuffer cb(s->entry_point(), stub_code_length);
157 MacroAssembler* masm = new MacroAssembler(&cb);
158
159 #if (!defined(PRODUCT) && defined(COMPILER2))
160 if (CountCompiledCalls) {
161 __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
162 __ increment(Address(r10));
163 }
164 #endif
165
166 // get receiver (need to skip return address on top of stack)
167 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
168
169 // Entry arguments:
170 // rscratch2: CompiledICData
171 // j_rarg0: Receiver
172
173 // This stub is called from compiled code which has no callee-saved registers,
174 // so all registers except arguments are free at this point.
189 // get receiver klass (also an implicit null-check)
190 address npe_addr = __ pc();
191 __ load_klass(recv_klass_reg, j_rarg0);
192
193 // Receiver subtype check against REFC.
194 // Get selected method from declaring class and itable index
195 __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
196 temp_reg, temp_reg2, itable_index, L_no_such_interface);
197
198 // Reduce "estimate" such that "padding" does not drop below 8.
199 const ptrdiff_t estimate = 144;
200 const ptrdiff_t codesize = __ pc() - start_pc;
201 slop_delta = (int)(estimate - codesize);
202 slop_bytes += slop_delta;
203 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
204
205 #ifdef ASSERT
206 if (DebugVtables) {
207 Label L2;
208 __ cbz(rmethod, L2);
209 __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
210 __ cbnz(rscratch1, L2);
211 __ stop("compiler entrypoint is null");
212 __ bind(L2);
213 }
214 #endif // ASSERT
215
216 // rmethod: Method*
217 // j_rarg0: receiver
218 address ame_addr = __ pc();
219 __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
220 __ br(rscratch1);
221
222 __ bind(L_no_such_interface);
223 // Handle IncompatibleClassChangeError in itable stubs.
224 // More detailed error message.
225 // We force resolving of the call site by jumping to the "handle
226 // wrong method" stub, and so let the interpreter runtime do all the
227 // dirty work.
228 assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
229 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
230
231 masm->flush();
232 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
233
234 return s;
235 }
236
237 int VtableStub::pd_code_alignment() {
238 // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
239 const unsigned int icache_line_size = 4;
240 return icache_line_size;
241 }
|
29 #include "code/vtableStubs.hpp"
30 #include "interp_masm_aarch64.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/klassVtable.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "vmreg_aarch64.inline.hpp"
36 #ifdef COMPILER2
37 #include "opto/runtime.hpp"
38 #endif
39
40 // machine-dependent part of VtableStubs: create VtableStub of correct size and
41 // initialize its code
42
43 #define __ masm->
44
45 #ifndef PRODUCT
46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
47 #endif
48
49 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
50 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
51 const int stub_code_length = code_size_limit(true);
52 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
53 // Can be null if there is no free space in the code cache.
54 if (s == nullptr) {
55 return nullptr;
56 }
57
58 // Count unused bytes in instruction sequences of variable size.
59 // We add them to the computed buffer size in order to avoid
60 // overflow in subsequently generated stubs.
61 address start_pc;
62 int slop_bytes = 0;
63 int slop_delta = 0;
64
65 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
66 const int index_dependent_slop = 0;
67 ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() : Method::from_compiled_inline_ro_offset();
68
69 ResourceMark rm;
70 CodeBuffer cb(s->entry_point(), stub_code_length);
71 MacroAssembler* masm = new MacroAssembler(&cb);
72
73 #if (!defined(PRODUCT) && defined(COMPILER2))
74 if (CountCompiledCalls) {
75 __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
76 __ increment(Address(r16));
77 }
78 #endif
79
80 // get receiver (need to skip return address on top of stack)
81 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
82
83 // get receiver klass
84 address npe_addr = __ pc();
85 __ load_klass(r16, j_rarg0);
86
87 #ifndef PRODUCT
88 if (DebugVtables) {
102 const ptrdiff_t codesize = __ pc() - start_pc;
103 slop_delta = estimate - codesize; // call_VM varies in length, depending on data
104 slop_bytes += slop_delta;
105 assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
106
107 __ leave();
108 __ bind(L);
109 }
110 #endif // PRODUCT
111
112 start_pc = __ pc();
113 __ lookup_virtual_method(r16, vtable_index, rmethod);
114 slop_delta = 8 - (int)(__ pc() - start_pc);
115 slop_bytes += slop_delta;
116 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
117
118 #ifndef PRODUCT
119 if (DebugVtables) {
120 Label L;
121 __ cbz(rmethod, L);
122 __ ldr(rscratch1, Address(rmethod, entry_offset));
123 __ cbnz(rscratch1, L);
124 __ stop("Vtable entry is null");
125 __ bind(L);
126 }
127 #endif // PRODUCT
128
129 // r0: receiver klass
130 // rmethod: Method*
131 // r2: receiver
132 address ame_addr = __ pc();
133 __ ldr(rscratch1, Address(rmethod, entry_offset));
134 __ br(rscratch1);
135
136 masm->flush();
137 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
138 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
139
140 return s;
141 }
142
143
144 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
145 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
146 const int stub_code_length = code_size_limit(false);
147 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
148 // Can be null if there is no free space in the code cache.
149 if (s == nullptr) {
150 return nullptr;
151 }
152
153 // Count unused bytes in instruction sequences of variable size.
154 // We add them to the computed buffer size in order to avoid
155 // overflow in subsequently generated stubs.
156 address start_pc;
157 int slop_bytes = 0;
158 int slop_delta = 0;
159
160 const int index_dependent_slop = (itable_index == 0) ? 4 : // code size change with transition from 8-bit to 32-bit constant (@index == 16).
161 (itable_index < 16) ? 3 : 0; // index == 0 generates even shorter code.
162 ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() : Method::from_compiled_inline_ro_offset();
163
164 ResourceMark rm;
165 CodeBuffer cb(s->entry_point(), stub_code_length);
166 MacroAssembler* masm = new MacroAssembler(&cb);
167
168 #if (!defined(PRODUCT) && defined(COMPILER2))
169 if (CountCompiledCalls) {
170 __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
171 __ increment(Address(r10));
172 }
173 #endif
174
175 // get receiver (need to skip return address on top of stack)
176 assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
177
178 // Entry arguments:
179 // rscratch2: CompiledICData
180 // j_rarg0: Receiver
181
182 // This stub is called from compiled code which has no callee-saved registers,
183 // so all registers except arguments are free at this point.
198 // get receiver klass (also an implicit null-check)
199 address npe_addr = __ pc();
200 __ load_klass(recv_klass_reg, j_rarg0);
201
202 // Receiver subtype check against REFC.
203 // Get selected method from declaring class and itable index
204 __ lookup_interface_method_stub(recv_klass_reg, holder_klass_reg, resolved_klass_reg, rmethod,
205 temp_reg, temp_reg2, itable_index, L_no_such_interface);
206
207 // Reduce "estimate" such that "padding" does not drop below 8.
208 const ptrdiff_t estimate = 144;
209 const ptrdiff_t codesize = __ pc() - start_pc;
210 slop_delta = (int)(estimate - codesize);
211 slop_bytes += slop_delta;
212 assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
213
214 #ifdef ASSERT
215 if (DebugVtables) {
216 Label L2;
217 __ cbz(rmethod, L2);
218 __ ldr(rscratch1, Address(rmethod, entry_offset));
219 __ cbnz(rscratch1, L2);
220 __ stop("compiler entrypoint is null");
221 __ bind(L2);
222 }
223 #endif // ASSERT
224
225 // rmethod: Method*
226 // j_rarg0: receiver
227 address ame_addr = __ pc();
228 __ ldr(rscratch1, Address(rmethod, entry_offset));
229 __ br(rscratch1);
230
231 __ bind(L_no_such_interface);
232 // Handle IncompatibleClassChangeError in itable stubs.
233 // More detailed error message.
234 // We force resolving of the call site by jumping to the "handle
235 // wrong method" stub, and so let the interpreter runtime do all the
236 // dirty work.
237 assert(SharedRuntime::get_handle_wrong_method_stub() != nullptr, "check initialization order");
238 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
239
240 masm->flush();
241 slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
242 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
243
244 return s;
245 }
246
247 int VtableStub::pd_code_alignment() {
248 // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
249 const unsigned int icache_line_size = 4;
250 return icache_line_size;
251 }
|