1 /*
  2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_FrameMap.hpp"
 27 #include "c1/c1_LIR.hpp"
 28 #include "code/vmreg.inline.hpp"
 29 #include "runtime/sharedRuntime.hpp"
 30 #include "utilities/align.hpp"
 31 
 32 //-----------------------------------------------------
 33 
 34 // Convert method signature into an array of BasicTypes for the arguments
 35 BasicTypeArray* FrameMap::signature_type_array_for(const ciMethod* method) {
 36   ciSignature* sig = method->signature();
 37   BasicTypeList* sta = new BasicTypeList(method->arg_size());
 38   // add receiver, if any
 39   if (!method->is_static()) sta->append(T_OBJECT);
 40   // add remaining arguments
 41   for (int i = 0; i < sig->count(); i++) {
 42     ciType* type = sig->type_at(i);
 43     BasicType t = type->basic_type();
 44     if (t == T_ARRAY) {
 45       t = T_OBJECT;
 46     }
 47     sta->append(t);
 48   }
 49   // done
 50   return sta;
 51 }
 52 
 53 
 54 CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
 55   // compute the size of the arguments first.  The signature array
 56   // that java_calling_convention takes includes a T_VOID after double
 57   // work items but our signatures do not.
 58   int i;
 59   int sizeargs = 0;
 60   for (i = 0; i < signature->length(); i++) {
 61     sizeargs += type2size[signature->at(i)];
 62   }
 63 
 64   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
 65   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
 66   int sig_index = 0;
 67   for (i = 0; i < sizeargs; i++, sig_index++) {
 68     sig_bt[i] = signature->at(sig_index);
 69     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 70       sig_bt[i + 1] = T_VOID;
 71       i++;
 72     }
 73   }
 74 
 75   intptr_t out_preserve = align_up(SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs), 2);
 76   LIR_OprList* args = new LIR_OprList(signature->length());
 77   for (i = 0; i < sizeargs;) {
 78     BasicType t = sig_bt[i];
 79     assert(t != T_VOID, "should be skipping these");
 80     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
 81     args->append(opr);
 82     if (opr->is_address()) {
 83       LIR_Address* addr = opr->as_address_ptr();
 84       assert(addr->disp() == (int)addr->disp(), "out of range value");
 85       out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
 86     }
 87     i += type2size[t];
 88   }
 89   assert(args->length() == signature->length(), "size mismatch");
 90   out_preserve += SharedRuntime::out_preserve_stack_slots();
 91 
 92   if (outgoing) {
 93     // update the space reserved for arguments.
 94     update_reserved_argument_area_size(out_preserve * BytesPerWord);
 95   }
 96   return new CallingConvention(args, out_preserve);
 97 }
 98 
 99 
100 CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
101   // compute the size of the arguments first.  The signature array
102   // that java_calling_convention takes includes a T_VOID after double
103   // work items but our signatures do not.
104   int i;
105   int sizeargs = 0;
106   for (i = 0; i < signature->length(); i++) {
107     sizeargs += type2size[signature->at(i)];
108   }
109 
110   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
111   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
112   int sig_index = 0;
113   for (i = 0; i < sizeargs; i++, sig_index++) {
114     sig_bt[i] = signature->at(sig_index);
115     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
116       sig_bt[i + 1] = T_VOID;
117       i++;
118     }
119   }
120 
121   intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, sizeargs);
122   LIR_OprList* args = new LIR_OprList(signature->length());
123   for (i = 0; i < sizeargs;) {
124     BasicType t = sig_bt[i];
125     assert(t != T_VOID, "should be skipping these");
126 
127     // C calls are always outgoing
128     bool outgoing = true;
129     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
130     // they might be of different types if for instance floating point
131     // values are passed in cpu registers, but the sizes must match.
132     assert(type2size[opr->type()] == type2size[t], "type mismatch");
133     args->append(opr);
134     if (opr->is_address()) {
135       LIR_Address* addr = opr->as_address_ptr();
136       out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
137     }
138     i += type2size[t];
139   }
140   assert(args->length() == signature->length(), "size mismatch");
141   out_preserve += SharedRuntime::out_preserve_stack_slots();
142   update_reserved_argument_area_size(out_preserve * BytesPerWord);
143   return new CallingConvention(args, out_preserve);
144 }
145 
146 
147 //--------------------------------------------------------
148 //               FrameMap
149 //--------------------------------------------------------
150 
151 bool      FrameMap::_init_done = false;
152 Register  FrameMap::_cpu_rnr2reg [FrameMap::nof_cpu_regs];
153 int       FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
154 
155 
156 FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
157   assert(_init_done, "should already be completed");
158 
159   _framesize = -1;
160   _num_spills = -1;
161 
162   assert(monitors >= 0, "not set");
163   _num_monitors = monitors;
164   assert(reserved_argument_area_size >= 0, "not set");
165 
166   // reserved_argument_area_size does not include stack space that needs to be occupied in the stub
167   // and the stub will modify the value through update_reserved_argument_area_size(int size) if needed.
168   // see the constructor of class CounterOverflowStub for example.
169   _reserved_argument_area_size = reserved_argument_area_size * BytesPerWord;
170 
171   _argcount = method->arg_size();
172   _argument_locations = new intArray(_argcount, _argcount, -1);
173   _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
174   _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();
175 
176   int java_index = 0;
177   for (int i = 0; i < _incoming_arguments->length(); i++) {
178     LIR_Opr opr = _incoming_arguments->at(i);
179     if (opr->is_address()) {
180       LIR_Address* address = opr->as_address_ptr();
181       _argument_locations->at_put(java_index, address->disp());
182       _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
183     }
184     java_index += type2size[opr->type()];
185   }
186 
187 }
188 
189 
190 bool FrameMap::finalize_frame(int nof_slots, bool needs_stack_repair) {
191   assert(nof_slots >= 0, "must be positive");
192   assert(_num_spills == -1, "can only be set once");
193   _num_spills = nof_slots;
194   assert(_framesize == -1, "should only be calculated once");
195   _framesize =  align_up(in_bytes(sp_offset_for_monitor_base(0)) +
196                          _num_monitors * (int)sizeof(BasicObjectLock) +
197                          (int)sizeof(intptr_t) +                             // offset of deopt orig pc
198                          (needs_stack_repair ? (int)sizeof(intptr_t) : 0) +  // stack increment value
199                          frame_pad_in_bytes,
200                          StackAlignmentInBytes) / 4;
201   int java_index = 0;
202   for (int i = 0; i < _incoming_arguments->length(); i++) {
203     LIR_Opr opr = _incoming_arguments->at(i);
204     if (opr->is_stack()) {
205       _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) +
206                                   _argument_locations->at(java_index));
207     }
208     java_index += type2size[opr->type()];
209   }
210   // make sure it's expressible on the platform
211   return validate_frame();
212 }
213 
214 VMReg FrameMap::sp_offset2vmreg(ByteSize offset) const {
215   int offset_in_bytes = in_bytes(offset);
216   assert(offset_in_bytes % 4 == 0, "must be multiple of 4 bytes");
217   assert(offset_in_bytes / 4 < framesize() + oop_map_arg_count(), "out of range");
218   return VMRegImpl::stack2reg(offset_in_bytes / 4);
219 }
220 
221 
222 bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp,
223                                       Location::Type loc_type,
224                                       Location* loc) const {
225   int offset = in_bytes(byte_offset_from_sp);
226   assert(offset >= 0, "incorrect offset");
227   if (!Location::legal_offset_in_bytes(offset)) {
228     return false;
229   }
230   Location tmp_loc = Location::new_stk_loc(loc_type, offset);
231   *loc = tmp_loc;
232   return true;
233 }
234 
235 
236 bool FrameMap::locations_for_slot  (int index, Location::Type loc_type,
237                                      Location* loc, Location* second) const {
238   ByteSize offset_from_sp = sp_offset_for_slot(index);
239   if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
240     return false;
241   }
242   if (second != nullptr) {
243     // two word item
244     offset_from_sp = offset_from_sp + in_ByteSize(4);
245     return location_for_sp_offset(offset_from_sp, loc_type, second);
246   }
247   return true;
248 }
249 
250 //////////////////////
251 // Public accessors //
252 //////////////////////
253 
254 
255 ByteSize FrameMap::sp_offset_for_slot(const int index) const {
256   if (index < argcount()) {
257     int offset = _argument_locations->at(index);
258     assert(offset != -1, "not a memory argument");
259     assert(offset >= framesize() * 4, "argument inside of frame");
260     return in_ByteSize(offset);
261   }
262   ByteSize offset = sp_offset_for_spill(index - argcount());
263   assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
264   return offset;
265 }
266 
267 
268 ByteSize FrameMap::sp_offset_for_double_slot(const int index) const {
269   ByteSize offset = sp_offset_for_slot(index);
270   if (index >= argcount()) {
271     assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame");
272   }
273   return offset;
274 }
275 
276 
277 ByteSize FrameMap::sp_offset_for_spill(const int index) const {
278   assert(index >= 0 && index < _num_spills, "out of range");
279   int offset = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
280     index * spill_slot_size_in_bytes;
281   return in_ByteSize(offset);
282 }
283 
284 ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
285   int end_of_spills = align_up(first_available_sp_in_frame + _reserved_argument_area_size, (int)sizeof(double)) +
286     _num_spills * spill_slot_size_in_bytes;
287   int offset = align_up(end_of_spills, HeapWordSize) + index * (int)sizeof(BasicObjectLock);
288   return in_ByteSize(offset);
289 }
290 
291 ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
292   check_monitor_index(index);
293   return sp_offset_for_monitor_base(index) + BasicObjectLock::lock_offset();
294 }
295 
296 ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
297   check_monitor_index(index);
298   return sp_offset_for_monitor_base(index) + BasicObjectLock::obj_offset();
299 }
300 
301 
302 // For OopMaps, map a local variable or spill index to an VMReg.
303 // This is the offset from sp() in the frame of the slot for the index,
304 // skewed by SharedInfo::stack0 to indicate a stack location (vs.a register.)
305 //
306 //         C ABI size +
307 //         framesize +     framesize +
308 //         stack0          stack0         stack0          0 <- VMReg->value()
309 //            |              |              | <registers> |
310 //  ..........|..............|..............|.............|
311 //    0 1 2 3 | <C ABI area> | 4 5 6 ...... |               <- local indices
312 //    ^                        ^          sp()
313 //    |                        |
314 //  arguments            non-argument locals
315 
316 
317 VMReg FrameMap::regname(LIR_Opr opr) const {
318   if (opr->is_single_cpu()) {
319     assert(!opr->is_virtual(), "should not see virtual registers here");
320     return opr->as_register()->as_VMReg();
321   } else if (opr->is_single_stack()) {
322     return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
323   } else if (opr->is_address()) {
324     LIR_Address* addr = opr->as_address_ptr();
325     assert(addr->base() == stack_pointer(), "sp based addressing only");
326     return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
327   }
328   ShouldNotReachHere();
329   return VMRegImpl::Bad();
330 }
331 
332 
333 
334 
335 // ------------ extra spill slots ---------------