1 /*
  2  * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "foreignGlobals.hpp"
 26 #include "classfile/javaClasses.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "prims/foreignGlobals.inline.hpp"
 29 #include "runtime/jniHandles.inline.hpp"
 30 #include "utilities/resourceHash.hpp"
 31 
 32 StubLocations::StubLocations() {
 33   for (uint32_t i = 0; i < LOCATION_LIMIT; i++) {
 34     _locs[i] = VMStorage::invalid();
 35   }
 36 }
 37 
 38 void StubLocations::set(uint32_t loc, VMStorage storage) {
 39   assert(loc < LOCATION_LIMIT, "oob");
 40   _locs[loc] = storage;
 41 }
 42 
 43 void StubLocations::set_frame_data(uint32_t loc, int offset) {
 44   set(loc, VMStorage(StorageType::FRAME_DATA, 8, offset));
 45 }
 46 
 47 VMStorage StubLocations::get(uint32_t loc) const {
 48   assert(loc < LOCATION_LIMIT, "oob");
 49   VMStorage storage = _locs[loc];
 50   assert(storage.is_valid(), "not set");
 51   return storage;
 52 }
 53 
 54 VMStorage StubLocations::get(VMStorage placeholder) const {
 55   assert(placeholder.type() == StorageType::PLACEHOLDER, "must be");
 56   return get(placeholder.index());
 57 }
 58 
 59 int StubLocations::data_offset(uint32_t loc) const {
 60   VMStorage storage = get(loc);
 61   assert(storage.type() == StorageType::FRAME_DATA, "must be");
 62   return storage.offset();
 63 }
 64 
 65 #define FOREIGN_ABI "jdk/internal/foreign/abi/"
 66 
 67 const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
 68   oop conv_oop = JNIHandles::resolve_non_null(jconv);
 69   objArrayOop arg_regs_oop = jdk_internal_foreign_abi_CallConv::argRegs(conv_oop);
 70   objArrayOop ret_regs_oop = jdk_internal_foreign_abi_CallConv::retRegs(conv_oop);
 71   int num_args = arg_regs_oop->length();
 72   int num_rets = ret_regs_oop->length();
 73   CallRegs result(num_args, num_rets);
 74 
 75   for (int i = 0; i < num_args; i++) {
 76     result._arg_regs.push(parse_vmstorage(arg_regs_oop->obj_at(i)));
 77   }
 78 
 79   for (int i = 0; i < num_rets; i++) {
 80     result._ret_regs.push(parse_vmstorage(ret_regs_oop->obj_at(i)));
 81   }
 82 
 83   return result;
 84 }
 85 
 86 VMStorage ForeignGlobals::parse_vmstorage(oop storage) {
 87   jbyte type = jdk_internal_foreign_abi_VMStorage::type(storage);
 88   jshort segment_mask_or_size = jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(storage);
 89   jint index_or_offset = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
 90 
 91   return VMStorage(static_cast<StorageType>(type), segment_mask_or_size, index_or_offset);
 92 }
 93 
 94 int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) {
 95   int result_size = 0;
 96   for (int i = 0; i < regs.length(); i++) {
 97     result_size += pd_reg_size(regs.at(i));
 98   }
 99   return result_size;
100 }
101 
102 void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
103   assert(rsp_offset != -1, "rsp_offset should be set");
104   int offset = rsp_offset;
105   for (int i = 0; i < _regs.length(); i++) {
106     VMStorage reg = _regs.at(i);
107     if (spill) {
108       pd_store_reg(masm, offset, reg);
109     } else {
110       pd_load_reg(masm, offset, reg);
111     }
112     offset += pd_reg_size(reg);
113   }
114 }
115 
116 void ArgumentShuffle::print_on(outputStream* os) const {
117   os->print_cr("Argument shuffle {");
118   for (int i = 0; i < _moves.length(); i++) {
119     Move move = _moves.at(i);
120     VMStorage from_reg = move.from;
121     VMStorage to_reg   = move.to;
122 
123     os->print("Move from ");
124     from_reg.print_on(os);
125     os->print(" to ");
126     to_reg.print_on(os);
127     os->print_cr("");
128   }
129   os->print_cr("Stack argument bytes: %d", _out_arg_bytes);
130   os->print_cr("}");
131 }
132 
133 int NativeCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* out_regs, int num_args) const {
134   int src_pos = 0;
135   uint32_t max_stack_offset = 0;
136   for (int i = 0; i < num_args; i++) {
137     switch (sig_bt[i]) {
138       case T_BOOLEAN:
139       case T_CHAR:
140       case T_BYTE:
141       case T_SHORT:
142       case T_INT:
143       case T_FLOAT: {
144         VMStorage reg = _input_regs.at(src_pos++);
145         out_regs[i] = reg;
146         if (reg.is_stack())
147           max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
148         break;
149       }
150       case T_LONG:
151       case T_DOUBLE: {
152         assert((i + 1) < num_args && sig_bt[i + 1] == T_VOID, "expecting half");
153         VMStorage reg = _input_regs.at(src_pos++);
154         out_regs[i] = reg;
155         if (reg.is_stack())
156           max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
157         break;
158       }
159       case T_VOID: // Halves of longs and doubles
160         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
161         out_regs[i] = VMStorage::invalid();
162         break;
163       default:
164         ShouldNotReachHere();
165         break;
166     }
167   }
168   return align_up(max_stack_offset, 8);
169 }
170 
171 int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
172   VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
173   int slots = align_up(SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args), 2);
174   for (int i = 0; i < num_args; i++) {
175     VMRegPair pair = vm_regs[i];
176     // note, we ignore second here. Signature should consist of register-size values. So there should be
177     // no need for multi-register pairs.
178     //assert(!pair.first()->is_valid() || pair.is_single_reg(), "must be: %s");
179     regs[i] = as_VMStorage(pair.first(), sig_bt[i]);
180   }
181   return slots << LogBytesPerInt;
182 }
183 
184 class ComputeMoveOrder: public StackObj {
185   class MoveOperation;
186 
187   // segment_mask_or_size is not taken into account since
188   // VMStorages that differ only in mask or size can still
189   // conflict
190   static inline unsigned hash(const VMStorage& vms) {
191     return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
192   }
193   static inline bool equals(const VMStorage& a, const VMStorage& b) {
194     return a.type() == b.type() && a.index_or_offset() == b.index_or_offset();
195   }
196 
197   using KillerTable = ResourceHashtable<
198     VMStorage, MoveOperation*,
199     32, // doesn't need to be big. don't have that many argument registers (in known ABIs)
200     AnyObj::RESOURCE_AREA,
201     mtInternal,
202     ComputeMoveOrder::hash,
203     ComputeMoveOrder::equals
204     >;
205 
206   class MoveOperation: public ResourceObj {
207     friend class ComputeMoveOrder;
208    private:
209     VMStorage       _src;
210     VMStorage       _dst;
211     bool            _processed;
212     MoveOperation*  _next;
213     MoveOperation*  _prev;
214 
215    public:
216     MoveOperation(VMStorage src, VMStorage dst):
217       _src(src), _dst(dst), _processed(false), _next(nullptr), _prev(nullptr) {}
218 
219     const VMStorage& src() const { return _src; }
220     const VMStorage& dst() const { return _dst; }
221     MoveOperation* next()  const { return _next; }
222     MoveOperation* prev()  const { return _prev; }
223     void set_processed()         { _processed = true; }
224     bool is_processed()    const { return _processed; }
225 
226     // insert
227     void break_cycle(VMStorage temp_register) {
228       // create a new store following the last store
229       // to move from the temp_register to the original
230       MoveOperation* new_store = new MoveOperation(temp_register, _dst);
231 
232       // break the cycle of links and insert new_store at the end
233       // break the reverse link.
234       MoveOperation* p = prev();
235       assert(p->next() == this, "must be");
236       _prev = nullptr;
237       p->_next = new_store;
238       new_store->_prev = p;
239 
240       // change the original store to save it's value in the temp.
241       _dst = temp_register;
242     }
243 
244     void link(KillerTable& killer) {
245       // link this store in front the store that it depends on
246       MoveOperation** n = killer.get(_src);
247       if (n != nullptr) {
248         MoveOperation* src_killer = *n;
249         assert(_next == nullptr && src_killer->_prev == nullptr, "shouldn't have been set yet");
250         _next = src_killer;
251         src_killer->_prev = this;
252       }
253     }
254 
255     Move as_move() {
256       return {_src, _dst};
257     }
258   };
259 
260  private:
261   int _total_in_args;
262   const VMStorage* _in_regs;
263   int _total_out_args;
264   const VMStorage* _out_regs;
265   const BasicType* _in_sig_bt;
266   VMStorage _tmp_vmreg;
267   GrowableArray<MoveOperation*> _edges;
268   GrowableArray<Move> _moves;
269 
270  public:
271   ComputeMoveOrder(int total_in_args, const VMStorage* in_regs, int total_out_args, VMStorage* out_regs,
272                    const BasicType* in_sig_bt, VMStorage tmp_vmreg) :
273       _total_in_args(total_in_args),
274       _in_regs(in_regs),
275       _total_out_args(total_out_args),
276       _out_regs(out_regs),
277       _in_sig_bt(in_sig_bt),
278       _tmp_vmreg(tmp_vmreg),
279       _edges(total_in_args),
280       _moves(total_in_args) {
281   }
282 
283   void compute() {
284     assert(_total_out_args >= _total_in_args, "can only add prefix args");
285     // Note that total_out_args args can be greater than total_in_args in the case of upcalls.
286     // There will be a leading MH receiver arg in the out args in that case.
287     //
288     // Leading args in the out args will be ignored below because we iterate from the end of
289     // the register arrays until !(in_idx >= 0), and total_in_args is smaller.
290     //
291     // Stub code adds a move for the receiver to j_rarg0 (and potential other prefix args) manually.
292     for (int in_idx = _total_in_args - 1, out_idx = _total_out_args - 1; in_idx >= 0; in_idx--, out_idx--) {
293       BasicType bt = _in_sig_bt[in_idx];
294       assert(bt != T_ARRAY, "array not expected");
295       VMStorage in_reg = _in_regs[in_idx];
296       VMStorage out_reg = _out_regs[out_idx];
297 
298       if (out_reg.is_stack() || out_reg.is_frame_data()) {
299         // Move operations where the dest is the stack can all be
300         // scheduled first since they can't interfere with the other moves.
301         // The input and output stack spaces are distinct from each other.
302         Move move{in_reg, out_reg};
303         _moves.push(move);
304       } else if (in_reg == out_reg
305                  || bt == T_VOID) {
306         // 1. Can skip non-stack identity moves.
307         //
308         // 2. Upper half of long or double (T_VOID).
309         //    Don't need to do anything.
310         continue;
311       } else {
312         _edges.append(new MoveOperation(in_reg, out_reg));
313       }
314     }
315     // Break any cycles in the register moves and emit the in the
316     // proper order.
317     compute_store_order(_tmp_vmreg);
318   }
319 
320   // Walk the edges breaking cycles between moves.  The result list
321   // can be walked in order to produce the proper set of loads
322   void compute_store_order(VMStorage temp_register) {
323     // Record which moves kill which registers
324     KillerTable killer; // a map of VMStorage -> MoveOperation*
325     for (int i = 0; i < _edges.length(); i++) {
326       MoveOperation* s = _edges.at(i);
327       assert(!killer.contains(s->dst()),
328              "multiple moves with the same register as destination");
329       killer.put(s->dst(), s);
330     }
331     assert(!killer.contains(temp_register),
332            "make sure temp isn't in the registers that are killed");
333 
334     // create links between loads and stores
335     for (int i = 0; i < _edges.length(); i++) {
336       _edges.at(i)->link(killer);
337     }
338 
339     // at this point, all the move operations are chained together
340     // in one or more doubly linked lists.  Processing them backwards finds
341     // the beginning of the chain, forwards finds the end.  If there's
342     // a cycle it can be broken at any point,  so pick an edge and walk
343     // backward until the list ends or we end where we started.
344     for (int e = 0; e < _edges.length(); e++) {
345       MoveOperation* s = _edges.at(e);
346       if (!s->is_processed()) {
347         MoveOperation* start = s;
348         // search for the beginning of the chain or cycle
349         while (start->prev() != nullptr && start->prev() != s) {
350           start = start->prev();
351         }
352         if (start->prev() == s) {
353           start->break_cycle(temp_register);
354         }
355         // walk the chain forward inserting to store list
356         while (start != nullptr) {
357           _moves.push(start->as_move());
358 
359           start->set_processed();
360           start = start->next();
361         }
362       }
363     }
364   }
365 
366 public:
367   static GrowableArray<Move> compute_move_order(int total_in_args, const VMStorage* in_regs,
368                                                 int total_out_args, VMStorage* out_regs,
369                                                 const BasicType* in_sig_bt, VMStorage tmp_vmreg) {
370     ComputeMoveOrder cmo(total_in_args, in_regs, total_out_args, out_regs, in_sig_bt, tmp_vmreg);
371     cmo.compute();
372     return cmo._moves;
373   }
374 };
375 
376 ArgumentShuffle::ArgumentShuffle(
377     BasicType* in_sig_bt,
378     int num_in_args,
379     BasicType* out_sig_bt,
380     int num_out_args,
381     const CallingConventionClosure* input_conv,
382     const CallingConventionClosure* output_conv,
383     VMStorage shuffle_temp) {
384 
385   VMStorage* in_regs = NEW_RESOURCE_ARRAY(VMStorage, num_in_args);
386   input_conv->calling_convention(in_sig_bt, in_regs, num_in_args);
387 
388   VMStorage* out_regs = NEW_RESOURCE_ARRAY(VMStorage, num_out_args);
389   _out_arg_bytes = output_conv->calling_convention(out_sig_bt, out_regs, num_out_args);
390 
391   _moves = ComputeMoveOrder::compute_move_order(num_in_args, in_regs,
392                                                 num_out_args, out_regs,
393                                                 in_sig_bt, shuffle_temp);
394 }