1 /*
   2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_FrameMap.hpp"
  27 #include "c1/c1_LIR.hpp"
  28 #include "runtime/sharedRuntime.hpp"
  29 #ifdef TARGET_ARCH_x86
  30 # include "vmreg_x86.inline.hpp"
  31 #endif
  32 #ifdef TARGET_ARCH_sparc
  33 # include "vmreg_sparc.inline.hpp"
  34 #endif
  35 #ifdef TARGET_ARCH_zero
  36 # include "vmreg_zero.inline.hpp"
  37 #endif
  38 #ifdef TARGET_ARCH_arm
  39 # include "vmreg_arm.inline.hpp"
  40 #endif
  41 #ifdef TARGET_ARCH_ppc
  42 # include "vmreg_ppc.inline.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_aarch32
  45 # include "vmreg_aarch32.inline.hpp"
  46 #endif
  47 
  48 
  49 
  50 //-----------------------------------------------------
  51 
  52 // Convert method signature into an array of BasicTypes for the arguments
  53 BasicTypeArray* FrameMap::signature_type_array_for(const ciMethod* method) {
  54   ciSignature* sig = method->signature();
  55   BasicTypeList* sta = new BasicTypeList(method->arg_size());
  56   // add receiver, if any
  57   if (!method->is_static()) sta->append(T_OBJECT);
  58   // add remaining arguments
  59   for (int i = 0; i < sig->count(); i++) {
  60     ciType* type = sig->type_at(i);
  61     BasicType t = type->basic_type();
  62     if (t == T_ARRAY) {
  63       t = T_OBJECT;
  64     }
  65     sta->append(t);
  66   }
  67   // done
  68   return sta;
  69 }
  70 
  71 
  72 CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
  73   // compute the size of the arguments first.  The signature array
  74   // that java_calling_convention takes includes a T_VOID after double
  75   // work items but our signatures do not.
  76   int i;
  77   int sizeargs = 0;
  78   for (i = 0; i < signature->length(); i++) {
  79     sizeargs += type2size[signature->at(i)];
  80   }
  81 
  82   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  83   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  84   int sig_index = 0;
  85   for (i = 0; i < sizeargs; i++, sig_index++) {
  86     sig_bt[i] = signature->at(sig_index);
  87     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
  88       sig_bt[i + 1] = T_VOID;
  89       i++;
  90     }
  91   }
  92 
  93   intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, outgoing);
  94   LIR_OprList* args = new LIR_OprList(signature->length());
  95   for (i = 0; i < sizeargs;) {
  96     BasicType t = sig_bt[i];
  97     assert(t != T_VOID, "should be skipping these");
  98     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
  99     args->append(opr);
 100     if (opr->is_address()) {
 101       LIR_Address* addr = opr->as_address_ptr();
 102       assert(addr->disp() == (int)addr->disp(), "out of range value");
 103       out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
 104     }
 105     i += type2size[t];
 106   }
 107   assert(args->length() == signature->length(), "size mismatch");
 108   out_preserve += SharedRuntime::out_preserve_stack_slots();
 109 
 110   if (outgoing) {
 111     // update the space reserved for arguments.
 112     update_reserved_argument_area_size(out_preserve * BytesPerWord);
 113   }
 114   return new CallingConvention(args, out_preserve);
 115 }
 116 
 117 
 118 CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
 119   // compute the size of the arguments first.  The signature array
 120   // that java_calling_convention takes includes a T_VOID after double
 121   // work items but our signatures do not.
 122   int i;
 123   int sizeargs = 0;
 124   for (i = 0; i < signature->length(); i++) {
 125     sizeargs += type2size[signature->at(i)];
 126   }
 127 
 128   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
 129   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
 130   int sig_index = 0;
 131   for (i = 0; i < sizeargs; i++, sig_index++) {
 132     sig_bt[i] = signature->at(sig_index);
 133     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 134       sig_bt[i + 1] = T_VOID;
 135       i++;
 136     }
 137   }
 138 
 139   intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs);
 140   LIR_OprList* args = new LIR_OprList(signature->length());
 141   for (i = 0; i < sizeargs;) {
 142     BasicType t = sig_bt[i];
 143     assert(t != T_VOID, "should be skipping these");
 144 
 145     // C calls are always outgoing
 146     bool outgoing = true;
 147     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
 148     // they might be of different types if for instance floating point
 149     // values are passed in cpu registers, but the sizes must match.
 150     assert(type2size[opr->type()] == type2size[t], "type mismatch");
 151     args->append(opr);
 152     if (opr->is_address()) {
 153       LIR_Address* addr = opr->as_address_ptr();
 154       out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
 155     }
 156     i += type2size[t];
 157   }
 158   assert(args->length() == signature->length(), "size mismatch");
 159   out_preserve += SharedRuntime::out_preserve_stack_slots();
 160   update_reserved_argument_area_size(out_preserve * BytesPerWord);
 161   return new CallingConvention(args, out_preserve);
 162 }
 163 
 164 
 165 //--------------------------------------------------------
 166 //               FrameMap
 167 //--------------------------------------------------------
 168 
 169 bool      FrameMap::_init_done = false;
 170 Register  FrameMap::_cpu_rnr2reg [FrameMap::nof_cpu_regs];
 171 int       FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
 172 
 173 
 174 FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
 175   assert(_init_done, "should already be completed");
 176 
 177   _framesize = -1;
 178   _num_spills = -1;
 179 
 180   assert(monitors >= 0, "not set");
 181   _num_monitors = monitors;
 182   assert(reserved_argument_area_size >= 0, "not set");
 183   _reserved_argument_area_size = MAX2(4, reserved_argument_area_size) * BytesPerWord;
 184 
 185   _argcount = method->arg_size();
 186   _argument_locations = new intArray(_argcount, -1);
 187   _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
 188   _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();
 189 
 190   int java_index = 0;
 191   for (int i = 0; i < _incoming_arguments->length(); i++) {
 192     LIR_Opr opr = _incoming_arguments->at(i);
 193     if (opr->is_address()) {
 194       LIR_Address* address = opr->as_address_ptr();
 195       _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
 196       _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
 197     }
 198     java_index += type2size[opr->type()];
 199   }
 200 
 201 }
 202 
 203 
 204 bool FrameMap::finalize_frame(int nof_slots) {
 205   assert(nof_slots >= 0, "must be positive");
 206   assert(_num_spills == -1, "can only be set once");
 207   _num_spills = nof_slots;
 208   assert(_framesize == -1, "should only be calculated once");
 209   _framesize =  round_to(in_bytes(sp_offset_for_monitor_base(0)) +
 210                          _num_monitors * sizeof(BasicObjectLock) +
 211                          sizeof(intptr_t) +                        // offset of deopt orig pc
 212                          frame_pad_in_bytes,
 213                          StackAlignmentInBytes) / 4;
 214   int java_index = 0;
 215   for (int i = 0; i < _incoming_arguments->length(); i++) {
 216     LIR_Opr opr = _incoming_arguments->at(i);
 217     if (opr->is_stack()) {
 218       _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) +
 219                                   _argument_locations->at(java_index));
 220     }
 221     java_index += type2size[opr->type()];
 222   }
 223   // make sure it's expressible on the platform
 224   return validate_frame();
 225 }
 226 
 227 VMReg FrameMap::sp_offset2vmreg(ByteSize offset) const {
 228   int offset_in_bytes = in_bytes(offset);
 229   assert(offset_in_bytes % 4 == 0, "must be multiple of 4 bytes");
 230   assert(offset_in_bytes / 4 < framesize() + oop_map_arg_count(), "out of range");
 231   return VMRegImpl::stack2reg(offset_in_bytes / 4);
 232 }
 233 
 234 
 235 bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp,
 236                                       Location::Type loc_type,
 237                                       Location* loc) const {
 238   int offset = in_bytes(byte_offset_from_sp);
 239   assert(offset >= 0, "incorrect offset");
 240   if (!Location::legal_offset_in_bytes(offset)) {
 241     return false;
 242   }
 243   Location tmp_loc = Location::new_stk_loc(loc_type, offset);
 244   *loc = tmp_loc;
 245   return true;
 246 }
 247 
 248 
 249 bool FrameMap::locations_for_slot  (int index, Location::Type loc_type,
 250                                      Location* loc, Location* second) const {
 251   ByteSize offset_from_sp = sp_offset_for_slot(index);
 252   if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
 253     return false;
 254   }
 255   if (second != NULL) {
 256     // two word item
 257     offset_from_sp = offset_from_sp + in_ByteSize(4);
 258     return location_for_sp_offset(offset_from_sp, loc_type, second);
 259   }
 260   return true;
 261 }
 262 
 263 //////////////////////
 264 // Public accessors //
 265 //////////////////////
 266 
 267 
 268 ByteSize FrameMap::sp_offset_for_slot(const int index) const {
 269   if (index < argcount()) {
 270     int offset = _argument_locations->at(index);
 271     assert(offset != -1, "not a memory argument");
 272     assert(offset >= framesize() * 4, "argument inside of frame");
 273     return in_ByteSize(offset);
 274   }
 275   ByteSize offset = sp_offset_for_spill(index - argcount());
 276   assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
 277   return offset;
 278 }
 279 
 280 
 281 ByteSize FrameMap::sp_offset_for_double_slot(const int index) const {
 282   ByteSize offset = sp_offset_for_slot(index);
 283   if (index >= argcount()) {
 284     assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame");
 285   }
 286   return offset;
 287 }
 288 
 289 
 290 ByteSize FrameMap::sp_offset_for_spill(const int index) const {
 291   assert(index >= 0 && index < _num_spills, "out of range");
 292   int offset = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
 293     index * spill_slot_size_in_bytes;
 294   return in_ByteSize(offset);
 295 }
 296 
 297 ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
 298   int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
 299     _num_spills * spill_slot_size_in_bytes;
 300   int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
 301   return in_ByteSize(offset);
 302 }
 303 
 304 ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
 305   check_monitor_index(index);
 306   return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::lock_offset_in_bytes());;
 307 }
 308 
 309 ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
 310   check_monitor_index(index);
 311   return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
 312 }
 313 
 314 
 315 // For OopMaps, map a local variable or spill index to an VMReg.
 316 // This is the offset from sp() in the frame of the slot for the index,
 317 // skewed by SharedInfo::stack0 to indicate a stack location (vs.a register.)
 318 //
 319 //         C ABI size +
 320 //         framesize +     framesize +
 321 //         stack0          stack0         stack0          0 <- VMReg->value()
 322 //            |              |              | <registers> |
 323 //  ..........|..............|..............|.............|
 324 //    0 1 2 3 | <C ABI area> | 4 5 6 ...... |               <- local indices
 325 //    ^                        ^          sp()
 326 //    |                        |
 327 //  arguments            non-argument locals
 328 
 329 
 330 VMReg FrameMap::regname(LIR_Opr opr) const {
 331   if (opr->is_single_cpu()) {
 332     assert(!opr->is_virtual(), "should not see virtual registers here");
 333     return opr->as_register()->as_VMReg();
 334   } else if (opr->is_single_stack()) {
 335     return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
 336   } else if (opr->is_address()) {
 337     LIR_Address* addr = opr->as_address_ptr();
 338     assert(addr->base() == stack_pointer(), "sp based addressing only");
 339     return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
 340   }
 341   ShouldNotReachHere();
 342   return VMRegImpl::Bad();
 343 }
 344 
 345 
 346 
 347 
 348 // ------------ extra spill slots ---------------