1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_FrameMap.hpp"
26 #include "c1/c1_LIR.hpp"
27 #include "code/vmreg.inline.hpp"
28 #include "runtime/sharedRuntime.hpp"
29 #include "utilities/align.hpp"
30
31 //-----------------------------------------------------
32
33 // Convert method signature into an array of BasicTypes for the arguments
34 BasicTypeArray* FrameMap::signature_type_array_for(const ciMethod* method) {
35 ciSignature* sig = method->signature();
36 BasicTypeList* sta = new BasicTypeList(method->arg_size());
37 // add receiver, if any
38 if (!method->is_static()) sta->append(T_OBJECT);
39 // add remaining arguments
40 for (int i = 0; i < sig->count(); i++) {
41 ciType* type = sig->type_at(i);
42 BasicType t = type->basic_type();
43 if (t == T_ARRAY) {
44 t = T_OBJECT;
45 }
46 sta->append(t);
47 }
48 // done
49 return sta;
50 }
51
52
53 CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
54 // compute the size of the arguments first. The signature array
55 // that java_calling_convention takes includes a T_VOID after double
56 // work items but our signatures do not.
57 int i;
58 int sizeargs = 0;
59 for (i = 0; i < signature->length(); i++) {
60 sizeargs += type2size[signature->at(i)];
61 }
62
63 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
64 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
65 int sig_index = 0;
66 for (i = 0; i < sizeargs; i++, sig_index++) {
67 sig_bt[i] = signature->at(sig_index);
68 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
69 sig_bt[i + 1] = T_VOID;
70 i++;
71 }
72 }
73
74 intptr_t out_preserve = align_up(SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs), 2);
75 LIR_OprList* args = new LIR_OprList(signature->length());
76 for (i = 0; i < sizeargs;) {
77 BasicType t = sig_bt[i];
78 assert(t != T_VOID, "should be skipping these");
79 LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
80 args->append(opr);
81 if (opr->is_address()) {
82 LIR_Address* addr = opr->as_address_ptr();
83 assert(addr->disp() == (int)addr->disp(), "out of range value");
84 out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
85 }
86 i += type2size[t];
87 }
88 assert(args->length() == signature->length(), "size mismatch");
89 out_preserve += SharedRuntime::out_preserve_stack_slots();
90
91 if (outgoing) {
92 // update the space reserved for arguments.
93 update_reserved_argument_area_size(out_preserve * BytesPerWord);
94 }
95 return new CallingConvention(args, out_preserve);
96 }
97
98
99 CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
100 // compute the size of the arguments first. The signature array
101 // that java_calling_convention takes includes a T_VOID after double
102 // work items but our signatures do not.
103 int i;
104 int sizeargs = 0;
105 for (i = 0; i < signature->length(); i++) {
106 sizeargs += type2size[signature->at(i)];
107 }
108
109 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
110 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
111 int sig_index = 0;
112 for (i = 0; i < sizeargs; i++, sig_index++) {
113 sig_bt[i] = signature->at(sig_index);
114 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
115 sig_bt[i + 1] = T_VOID;
116 i++;
117 }
118 }
119
120 intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, sizeargs);
121 LIR_OprList* args = new LIR_OprList(signature->length());
122 for (i = 0; i < sizeargs;) {
123 BasicType t = sig_bt[i];
124 assert(t != T_VOID, "should be skipping these");
125
126 // C calls are always outgoing
127 bool outgoing = true;
128 LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
129 // they might be of different types if for instance floating point
130 // values are passed in cpu registers, but the sizes must match.
131 assert(type2size[opr->type()] == type2size[t], "type mismatch");
132 args->append(opr);
133 if (opr->is_address()) {
134 LIR_Address* addr = opr->as_address_ptr();
135 out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
136 }
137 i += type2size[t];
138 }
139 assert(args->length() == signature->length(), "size mismatch");
140 out_preserve += SharedRuntime::out_preserve_stack_slots();
141 update_reserved_argument_area_size(out_preserve * BytesPerWord);
142 return new CallingConvention(args, out_preserve);
143 }
144
145
146 //--------------------------------------------------------
147 // FrameMap
148 //--------------------------------------------------------
149
150 bool FrameMap::_init_done = false;
151 Register FrameMap::_cpu_rnr2reg [FrameMap::nof_cpu_regs];
152 int FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
153
154
155 FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
156 assert(_init_done, "should already be completed");
157
158 _framesize = -1;
159 _num_spills = -1;
160
161 assert(monitors >= 0, "not set");
162 _num_monitors = monitors;
163 assert(reserved_argument_area_size >= 0, "not set");
164
165 // reserved_argument_area_size does not include stack space that needs to be occupied in the stub
166 // and the stub will modify the value through update_reserved_argument_area_size(int size) if needed.
167 // see the constructor of class CounterOverflowStub for example.
168 _reserved_argument_area_size = reserved_argument_area_size * BytesPerWord;
169
170 _argcount = method->arg_size();
171 _argument_locations = new intArray(_argcount, _argcount, -1);
172 _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
173 _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();
174
175 int java_index = 0;
176 for (int i = 0; i < _incoming_arguments->length(); i++) {
177 LIR_Opr opr = _incoming_arguments->at(i);
178 if (opr->is_address()) {
179 LIR_Address* address = opr->as_address_ptr();
180 _argument_locations->at_put(java_index, address->disp());
181 _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
182 }
183 java_index += type2size[opr->type()];
184 }
185
186 }
187
188
189 bool FrameMap::finalize_frame(int nof_slots, bool needs_stack_repair) {
190 assert(nof_slots >= 0, "must be positive");
191 assert(_num_spills == -1, "can only be set once");
192 _num_spills = nof_slots;
193 assert(_framesize == -1, "should only be calculated once");
194 _framesize = align_up(in_bytes(sp_offset_for_monitor_base(0)) +
195 _num_monitors * (int)sizeof(BasicObjectLock) +
196 (int)sizeof(intptr_t) + // offset of deopt orig pc
197 (needs_stack_repair ? (int)sizeof(intptr_t) : 0) + // stack increment value
198 frame_pad_in_bytes,
199 StackAlignmentInBytes) / 4;
200 int java_index = 0;
201 for (int i = 0; i < _incoming_arguments->length(); i++) {
202 LIR_Opr opr = _incoming_arguments->at(i);
203 if (opr->is_stack()) {
204 _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) +
205 _argument_locations->at(java_index));
206 }
207 java_index += type2size[opr->type()];
208 }
209 // make sure it's expressible on the platform
210 return validate_frame();
211 }
212
213 VMReg FrameMap::sp_offset2vmreg(ByteSize offset) const {
214 int offset_in_bytes = in_bytes(offset);
215 assert(offset_in_bytes % 4 == 0, "must be multiple of 4 bytes");
216 assert(offset_in_bytes / 4 < framesize() + oop_map_arg_count(), "out of range");
217 return VMRegImpl::stack2reg(offset_in_bytes / 4);
218 }
219
220
221 bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp,
222 Location::Type loc_type,
223 Location* loc) const {
224 int offset = in_bytes(byte_offset_from_sp);
225 assert(offset >= 0, "incorrect offset");
226 if (!Location::legal_offset_in_bytes(offset)) {
227 return false;
228 }
229 Location tmp_loc = Location::new_stk_loc(loc_type, offset);
230 *loc = tmp_loc;
231 return true;
232 }
233
234
235 bool FrameMap::locations_for_slot (int index, Location::Type loc_type,
236 Location* loc, Location* second) const {
237 ByteSize offset_from_sp = sp_offset_for_slot(index);
238 if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
239 return false;
240 }
241 if (second != nullptr) {
242 // two word item
243 offset_from_sp = offset_from_sp + in_ByteSize(4);
244 return location_for_sp_offset(offset_from_sp, loc_type, second);
245 }
246 return true;
247 }
248
249 //////////////////////
250 // Public accessors //
251 //////////////////////
252
253
254 ByteSize FrameMap::sp_offset_for_slot(const int index) const {
255 if (index < argcount()) {
256 int offset = _argument_locations->at(index);
257 assert(offset != -1, "not a memory argument");
258 assert(offset >= framesize() * 4, "argument inside of frame");
259 return in_ByteSize(offset);
260 }
261 ByteSize offset = sp_offset_for_spill(index - argcount());
262 assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
263 return offset;
264 }
265
266
267 ByteSize FrameMap::sp_offset_for_double_slot(const int index) const {
268 ByteSize offset = sp_offset_for_slot(index);
269 if (index >= argcount()) {
270 assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame");
271 }
272 return offset;
273 }
274
275
276 ByteSize FrameMap::sp_offset_for_spill(const int index) const {
277 assert(index >= 0 && index < _num_spills, "out of range");
278 int offset = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
279 index * spill_slot_size_in_bytes;
280 return in_ByteSize(offset);
281 }
282
283 ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
284 int end_of_spills = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
285 _num_spills * spill_slot_size_in_bytes;
286 int offset = align_up(end_of_spills, HeapWordSize) + index * (int)sizeof(BasicObjectLock);
287 return in_ByteSize(offset);
288 }
289
290 ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
291 check_monitor_index(index);
292 return sp_offset_for_monitor_base(index) + BasicObjectLock::lock_offset();
293 }
294
295 ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
296 check_monitor_index(index);
297 return sp_offset_for_monitor_base(index) + BasicObjectLock::obj_offset();
298 }
299
300
301 // For OopMaps, map a local variable or spill index to an VMReg.
302 // This is the offset from sp() in the frame of the slot for the index,
303 // skewed by SharedInfo::stack0 to indicate a stack location (vs.a register.)
304 //
305 // C ABI size +
306 // framesize + framesize +
307 // stack0 stack0 stack0 0 <- VMReg->value()
308 // | | | <registers> |
309 // ..........|..............|..............|.............|
310 // 0 1 2 3 | <C ABI area> | 4 5 6 ...... | <- local indices
311 // ^ ^ sp()
312 // | |
313 // arguments non-argument locals
314
315
316 VMReg FrameMap::regname(LIR_Opr opr) const {
317 if (opr->is_single_cpu()) {
318 assert(!opr->is_virtual(), "should not see virtual registers here");
319 return opr->as_register()->as_VMReg();
320 } else if (opr->is_single_stack()) {
321 return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
322 } else if (opr->is_address()) {
323 LIR_Address* addr = opr->as_address_ptr();
324 assert(addr->base() == stack_pointer(), "sp based addressing only");
325 return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
326 }
327 ShouldNotReachHere();
328 return VMRegImpl::Bad();
329 }
330
331
332
333
334 // ------------ extra spill slots ---------------