< prev index next >

src/hotspot/share/prims/foreignGlobals.cpp

Print this page

153         VMStorage reg = _input_regs.at(src_pos++);
154         out_regs[i] = reg;
155         if (reg.is_stack())
156           max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
157         break;
158       }
159       case T_VOID: // Halves of longs and doubles
160         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
161         out_regs[i] = VMStorage::invalid();
162         break;
163       default:
164         ShouldNotReachHere();
165         break;
166     }
167   }
168   return align_up(max_stack_offset, 8);
169 }
170 
171 int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
172   VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
173   int slots = SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args);
174   for (int i = 0; i < num_args; i++) {
175     VMRegPair pair = vm_regs[i];
176     // note, we ignore second here. Signature should consist of register-size values. So there should be
177     // no need for multi-register pairs.
178     //assert(!pair.first()->is_valid() || pair.is_single_reg(), "must be: %s");
179     regs[i] = as_VMStorage(pair.first(), sig_bt[i]);
180   }
181   return slots << LogBytesPerInt;
182 }
183 
184 class ComputeMoveOrder: public StackObj {
185   class MoveOperation;
186 
187   // segment_mask_or_size is not taken into account since
188   // VMStorages that differ only in mask or size can still
189   // conflict
190   static inline unsigned hash(const VMStorage& vms) {
191     return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
192   }
193   static inline bool equals(const VMStorage& a, const VMStorage& b) {

153         VMStorage reg = _input_regs.at(src_pos++);
154         out_regs[i] = reg;
155         if (reg.is_stack())
156           max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
157         break;
158       }
159       case T_VOID: // Halves of longs and doubles
160         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
161         out_regs[i] = VMStorage::invalid();
162         break;
163       default:
164         ShouldNotReachHere();
165         break;
166     }
167   }
168   return align_up(max_stack_offset, 8);
169 }
170 
171 int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
172   VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
173   int slots = align_up(SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args), 2);
174   for (int i = 0; i < num_args; i++) {
175     VMRegPair pair = vm_regs[i];
176     // note, we ignore second here. Signature should consist of register-size values. So there should be
177     // no need for multi-register pairs.
178     //assert(!pair.first()->is_valid() || pair.is_single_reg(), "must be: %s");
179     regs[i] = as_VMStorage(pair.first(), sig_bt[i]);
180   }
181   return slots << LogBytesPerInt;
182 }
183 
184 class ComputeMoveOrder: public StackObj {
185   class MoveOperation;
186 
187   // segment_mask_or_size is not taken into account since
188   // VMStorages that differ only in mask or size can still
189   // conflict
190   static inline unsigned hash(const VMStorage& vms) {
191     return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
192   }
193   static inline bool equals(const VMStorage& a, const VMStorage& b) {
< prev index next >