< prev index next >

src/hotspot/share/prims/foreign_globals.cpp

Print this page

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "foreign_globals.hpp"
 26 #include "classfile/symbolTable.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/fieldDescriptor.hpp"
 31 #include "runtime/fieldDescriptor.inline.hpp"

 32 
 33 #define FOREIGN_ABI "jdk/internal/foreign/abi/"
 34 
 35 static int field_offset(InstanceKlass* cls, const char* fieldname, Symbol* sigsym) {
 36   TempNewSymbol fieldnamesym = SymbolTable::new_symbol(fieldname, (int)strlen(fieldname));
 37   fieldDescriptor fd;
 38   bool success = cls->find_field(fieldnamesym, sigsym, false, &fd);
 39   assert(success, "Field not found");
 40   return fd.offset();
 41 }
 42 
 43 static InstanceKlass* find_InstanceKlass(const char* name, TRAPS) {
 44   TempNewSymbol sym = SymbolTable::new_symbol(name, (int)strlen(name));
 45   Klass* k = SystemDictionary::resolve_or_null(sym, Handle(), Handle(), THREAD);
 46   assert(k != nullptr, "Can not find class: %s", name);
 47   return InstanceKlass::cast(k);
 48 }
 49 
 50 const ForeignGlobals& ForeignGlobals::instance() {
 51   static ForeignGlobals globals; // thread-safe lazy init-once (since C++11)
 52   return globals;
 53 }
 54 
 55 const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
 56   return instance().parse_abi_descriptor_impl(jabi);
 57 }
 58 const BufferLayout ForeignGlobals::parse_buffer_layout(jobject jlayout) {
 59   return instance().parse_buffer_layout_impl(jlayout);
 60 }
 61 
 62 const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
 63   return instance().parse_call_regs_impl(jconv);
 64 }
 65 
 66 ForeignGlobals::ForeignGlobals() {
 67   JavaThread* current_thread = JavaThread::current();
 68   ResourceMark rm(current_thread);
 69 
 70   // ABIDescriptor
 71   InstanceKlass* k_ABI = find_InstanceKlass(FOREIGN_ABI "ABIDescriptor", current_thread);
 72   const char* strVMSArrayArray = "[[L" FOREIGN_ABI "VMStorage;";
 73   Symbol* symVMSArrayArray = SymbolTable::new_symbol(strVMSArrayArray);
 74   ABI.inputStorage_offset = field_offset(k_ABI, "inputStorage", symVMSArrayArray);
 75   ABI.outputStorage_offset = field_offset(k_ABI, "outputStorage", symVMSArrayArray);
 76   ABI.volatileStorage_offset = field_offset(k_ABI, "volatileStorage", symVMSArrayArray);
 77   ABI.stackAlignment_offset = field_offset(k_ABI, "stackAlignment", vmSymbols::int_signature());
 78   ABI.shadowSpace_offset = field_offset(k_ABI, "shadowSpace", vmSymbols::int_signature());




 79 
 80   // VMStorage
 81   InstanceKlass* k_VMS = find_InstanceKlass(FOREIGN_ABI "VMStorage", current_thread);
 82   VMS.index_offset = field_offset(k_VMS, "index", vmSymbols::int_signature());
 83   VMS.type_offset = field_offset(k_VMS, "type", vmSymbols::int_signature());
 84 
 85   // BufferLayout
 86   InstanceKlass* k_BL = find_InstanceKlass(FOREIGN_ABI "BufferLayout", current_thread);
 87   BL.size_offset = field_offset(k_BL, "size", vmSymbols::long_signature());
 88   BL.arguments_next_pc_offset = field_offset(k_BL, "arguments_next_pc", vmSymbols::long_signature());
 89   BL.stack_args_bytes_offset = field_offset(k_BL, "stack_args_bytes", vmSymbols::long_signature());
 90   BL.stack_args_offset = field_offset(k_BL, "stack_args", vmSymbols::long_signature());
 91   BL.input_type_offsets_offset = field_offset(k_BL, "input_type_offsets", vmSymbols::long_array_signature());
 92   BL.output_type_offsets_offset = field_offset(k_BL, "output_type_offsets", vmSymbols::long_array_signature());
 93 
 94   // CallRegs
 95   const char* strVMSArray = "[L" FOREIGN_ABI "VMStorage;";
 96   Symbol* symVMSArray = SymbolTable::new_symbol(strVMSArray);
 97   InstanceKlass* k_CC = find_InstanceKlass(FOREIGN_ABI "ProgrammableUpcallHandler$CallRegs", current_thread);
 98   CallConvOffsets.arg_regs_offset = field_offset(k_CC, "argRegs", symVMSArray);
 99   CallConvOffsets.ret_regs_offset = field_offset(k_CC, "retRegs", symVMSArray);
100 }
101 
102 void CallRegs::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {









































































103   int src_pos = 0;
104   for (uint i = 0; i < argcnt; i++) {

105     switch (sig_bt[i]) {
106       case T_BOOLEAN:
107       case T_CHAR:
108       case T_BYTE:
109       case T_SHORT:
110       case T_INT:
111       case T_FLOAT:
112         assert(src_pos < _args_length, "oob");
113         parm_regs[i].set1(_arg_regs[src_pos++]);



114         break;

115       case T_LONG:
116       case T_DOUBLE:
117         assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
118         assert(src_pos < _args_length, "oob");
119         parm_regs[i].set2(_arg_regs[src_pos++]);



120         break;

121       case T_VOID: // Halves of longs and doubles
122         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
123         parm_regs[i].set_bad();
124         break;
125       default:
126         ShouldNotReachHere();
127         break;
128     }
129   }
































































































































































































130 }

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "foreign_globals.hpp"
 26 #include "classfile/symbolTable.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "prims/foreign_globals.inline.hpp"
 31 #include "runtime/fieldDescriptor.inline.hpp"
 32 #include "runtime/jniHandles.inline.hpp"
 33 
 34 #define FOREIGN_ABI "jdk/internal/foreign/abi/"
 35 
 36 static int field_offset(InstanceKlass* cls, const char* fieldname, Symbol* sigsym) {
 37   TempNewSymbol fieldnamesym = SymbolTable::new_symbol(fieldname, (int)strlen(fieldname));
 38   fieldDescriptor fd;
 39   bool success = cls->find_field(fieldnamesym, sigsym, false, &fd);
 40   assert(success, "Field not found");
 41   return fd.offset();
 42 }
 43 
 44 static InstanceKlass* find_InstanceKlass(const char* name, TRAPS) {
 45   TempNewSymbol sym = SymbolTable::new_symbol(name, (int)strlen(name));
 46   Klass* k = SystemDictionary::resolve_or_null(sym, Handle(), Handle(), THREAD);
 47   assert(k != nullptr, "Can not find class: %s", name);
 48   return InstanceKlass::cast(k);
 49 }
 50 
 51 const ForeignGlobals& ForeignGlobals::instance() {
 52   static ForeignGlobals globals; // thread-safe lazy init-once (since C++11)
 53   return globals;
 54 }
 55 
 56 const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
 57   return instance().parse_abi_descriptor_impl(jabi);
 58 }



 59 
 60 const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
 61   return instance().parse_call_regs_impl(jconv);
 62 }
 63 
 64 ForeignGlobals::ForeignGlobals() {
 65   JavaThread* current_thread = JavaThread::current();
 66   ResourceMark rm(current_thread);
 67 
 68   // ABIDescriptor
 69   InstanceKlass* k_ABI = find_InstanceKlass(FOREIGN_ABI "ABIDescriptor", current_thread);
 70   const char* strVMSArrayArray = "[[L" FOREIGN_ABI "VMStorage;";
 71   Symbol* symVMSArrayArray = SymbolTable::new_symbol(strVMSArrayArray);
 72   ABI.inputStorage_offset = field_offset(k_ABI, "inputStorage", symVMSArrayArray);
 73   ABI.outputStorage_offset = field_offset(k_ABI, "outputStorage", symVMSArrayArray);
 74   ABI.volatileStorage_offset = field_offset(k_ABI, "volatileStorage", symVMSArrayArray);
 75   ABI.stackAlignment_offset = field_offset(k_ABI, "stackAlignment", vmSymbols::int_signature());
 76   ABI.shadowSpace_offset = field_offset(k_ABI, "shadowSpace", vmSymbols::int_signature());
 77   const char* strVMS = "L" FOREIGN_ABI "VMStorage;";
 78   Symbol* symVMS = SymbolTable::new_symbol(strVMS);
 79   ABI.targetAddrStorage_offset = field_offset(k_ABI, "targetAddrStorage", symVMS);
 80   ABI.retBufAddrStorage_offset = field_offset(k_ABI, "retBufAddrStorage", symVMS);
 81 
 82   // VMStorage
 83   InstanceKlass* k_VMS = find_InstanceKlass(FOREIGN_ABI "VMStorage", current_thread);
 84   VMS.index_offset = field_offset(k_VMS, "index", vmSymbols::int_signature());
 85   VMS.type_offset = field_offset(k_VMS, "type", vmSymbols::int_signature());
 86 









 87   // CallRegs
 88   const char* strVMSArray = "[L" FOREIGN_ABI "VMStorage;";
 89   Symbol* symVMSArray = SymbolTable::new_symbol(strVMSArray);
 90   InstanceKlass* k_CC = find_InstanceKlass(FOREIGN_ABI "ProgrammableUpcallHandler$CallRegs", current_thread);
 91   CallConvOffsets.arg_regs_offset = field_offset(k_CC, "argRegs", symVMSArray);
 92   CallConvOffsets.ret_regs_offset = field_offset(k_CC, "retRegs", symVMSArray);
 93 }
 94 
 95 const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
 96   oop conv_oop = JNIHandles::resolve_non_null(jconv);
 97   objArrayOop arg_regs_oop = oop_cast<objArrayOop>(conv_oop->obj_field(CallConvOffsets.arg_regs_offset));
 98   objArrayOop ret_regs_oop = oop_cast<objArrayOop>(conv_oop->obj_field(CallConvOffsets.ret_regs_offset));
 99 
100   CallRegs result;
101   result._args_length = arg_regs_oop->length();
102   result._arg_regs = NEW_RESOURCE_ARRAY(VMReg, result._args_length);
103 
104   result._rets_length = ret_regs_oop->length();
105   result._ret_regs = NEW_RESOURCE_ARRAY(VMReg, result._rets_length);
106 
107   for (int i = 0; i < result._args_length; i++) {
108     result._arg_regs[i] = parse_vmstorage(arg_regs_oop->obj_at(i));
109   }
110 
111   for (int i = 0; i < result._rets_length; i++) {
112     result._ret_regs[i] = parse_vmstorage(ret_regs_oop->obj_at(i));
113   }
114 
115   return result;
116 }
117 
118 VMReg ForeignGlobals::parse_vmstorage(oop storage) const {
119   jint index = storage->int_field(VMS.index_offset);
120   jint type = storage->int_field(VMS.type_offset);
121   return vmstorage_to_vmreg(type, index);
122 }
123 
124 int RegSpiller::compute_spill_area(const VMReg* regs, int num_regs) {
125   int result_size = 0;
126   for (int i = 0; i < num_regs; i++) {
127     result_size += pd_reg_size(regs[i]);
128   }
129   return result_size;
130 }
131 
132 void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
133   assert(rsp_offset != -1, "rsp_offset should be set");
134   int offset = rsp_offset;
135   for (int i = 0; i < _num_regs; i++) {
136     VMReg reg = _regs[i];
137     if (spill) {
138       pd_store_reg(masm, offset, reg);
139     } else {
140       pd_load_reg(masm, offset, reg);
141     }
142     offset += pd_reg_size(reg);
143   }
144 }
145 
146 void ArgumentShuffle::print_on(outputStream* os) const {
147   os->print_cr("Argument shuffle {");
148   for (int i = 0; i < _moves.length(); i++) {
149     Move move = _moves.at(i);
150     BasicType arg_bt     = move.bt;
151     VMRegPair from_vmreg = move.from;
152     VMRegPair to_vmreg   = move.to;
153 
154     os->print("Move a %s from (", null_safe_string(type2name(arg_bt)));
155     from_vmreg.first()->print_on(os);
156     os->print(",");
157     from_vmreg.second()->print_on(os);
158     os->print(") to (");
159     to_vmreg.first()->print_on(os);
160     os->print(",");
161     to_vmreg.second()->print_on(os);
162     os->print_cr(")");
163   }
164   os->print_cr("Stack argument slots: %d", _out_arg_stack_slots);
165   os->print_cr("}");
166 }
167 
168 int NativeCallConv::calling_convention(BasicType* sig_bt, VMRegPair* out_regs, int num_args) const {
169   int src_pos = 0;
170   int stk_slots = 0;
171   for (int i = 0; i < num_args; i++) {
172     switch (sig_bt[i]) {
173       case T_BOOLEAN:
174       case T_CHAR:
175       case T_BYTE:
176       case T_SHORT:
177       case T_INT:
178       case T_FLOAT: {
179         assert(src_pos < _input_regs_length, "oob");
180         VMReg reg = _input_regs[src_pos++];
181         out_regs[i].set1(reg);
182         if (reg->is_stack())
183           stk_slots += 2;
184         break;
185       }
186       case T_LONG:
187       case T_DOUBLE: {
188         assert((i + 1) < num_args && sig_bt[i + 1] == T_VOID, "expecting half");
189         assert(src_pos < _input_regs_length, "oob");
190         VMReg reg = _input_regs[src_pos++];
191         out_regs[i].set2(reg);
192         if (reg->is_stack())
193           stk_slots += 2;
194         break;
195       }
196       case T_VOID: // Halves of longs and doubles
197         assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
198         out_regs[i].set_bad();
199         break;
200       default:
201         ShouldNotReachHere();
202         break;
203     }
204   }
205   return stk_slots;
206 }
207 
208 // based on ComputeMoveOrder from x86_64 shared runtime code.
209 // with some changes.
210 class ForeignCMO: public StackObj {
211   class MoveOperation: public ResourceObj {
212     friend class ForeignCMO;
213    private:
214     VMRegPair        _src;
215     VMRegPair        _dst;
216     bool             _processed;
217     MoveOperation*  _next;
218     MoveOperation*  _prev;
219     BasicType        _bt;
220 
221     static int get_id(VMRegPair r) {
222       return r.first()->value();
223     }
224 
225    public:
226     MoveOperation(VMRegPair src, VMRegPair dst, BasicType bt):
227       _src(src)
228     , _dst(dst)
229     , _processed(false)
230     , _next(NULL)
231     , _prev(NULL)
232     , _bt(bt) {
233     }
234 
235     int src_id() const          { return get_id(_src); }
236     int dst_id() const          { return get_id(_dst); }
237     MoveOperation* next() const { return _next; }
238     MoveOperation* prev() const { return _prev; }
239     void set_processed()        { _processed = true; }
240     bool is_processed() const   { return _processed; }
241 
242     // insert
243     void break_cycle(VMRegPair temp_register) {
244       // create a new store following the last store
245       // to move from the temp_register to the original
246       MoveOperation* new_store = new MoveOperation(temp_register, _dst, _bt);
247 
248       // break the cycle of links and insert new_store at the end
249       // break the reverse link.
250       MoveOperation* p = prev();
251       assert(p->next() == this, "must be");
252       _prev = NULL;
253       p->_next = new_store;
254       new_store->_prev = p;
255 
256       // change the original store to save it's value in the temp.
257       _dst = temp_register;
258     }
259 
260     void link(GrowableArray<MoveOperation*>& killer) {
261       // link this store in front the store that it depends on
262       MoveOperation* n = killer.at_grow(src_id(), NULL);
263       if (n != NULL) {
264         assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
265         _next = n;
266         n->_prev = this;
267       }
268     }
269 
270     Move as_move() {
271       return {_bt, _src, _dst};
272     }
273   };
274 
275  private:
276   GrowableArray<MoveOperation*> _edges;
277   GrowableArray<Move> _moves;
278 
279  public:
280   ForeignCMO(int total_in_args, const VMRegPair* in_regs, int total_out_args, VMRegPair* out_regs,
281              const BasicType* in_sig_bt, VMRegPair tmp_vmreg) : _edges(total_in_args), _moves(total_in_args) {
282     assert(total_out_args >= total_in_args, "can only add prefix args");
283     // Note that total_out_args args can be greater than total_in_args in the case of upcalls.
284     // There will be a leading MH receiver arg in the out args in that case.
285     //
286     // Leading args in the out args will be ignored below because we iterate from the end of
287     // the register arrays until !(in_idx >= 0), and total_in_args is smaller.
288     //
289     // Stub code adds a move for the receiver to j_rarg0 (and potential other prefix args) manually.
290     for (int in_idx = total_in_args - 1, out_idx = total_out_args - 1; in_idx >= 0; in_idx--, out_idx--) {
291       BasicType bt = in_sig_bt[in_idx];
292       assert(bt != T_ARRAY, "array not expected");
293       VMRegPair in_reg = in_regs[in_idx];
294       VMRegPair out_reg = out_regs[out_idx];
295 
296       if (out_reg.first()->is_stack()) {
297         // Move operations where the dest is the stack can all be
298         // scheduled first since they can't interfere with the other moves.
299         // The input and output stack spaces are distinct from each other.
300         Move move{bt, in_reg, out_reg};
301         _moves.push(move);
302       } else if (in_reg.first() == out_reg.first()
303                  || bt == T_VOID) {
304         // 1. Can skip non-stack identity moves.
305         //
306         // 2. Upper half of long or double (T_VOID).
307         //    Don't need to do anything.
308         continue;
309       } else {
310         _edges.append(new MoveOperation(in_reg, out_reg, bt));
311       }
312     }
313     // Break any cycles in the register moves and emit the in the
314     // proper order.
315     compute_store_order(tmp_vmreg);
316   }
317 
318   // Walk the edges breaking cycles between moves.  The result list
319   // can be walked in order to produce the proper set of loads
320   void compute_store_order(VMRegPair temp_register) {
321     // Record which moves kill which values
322     GrowableArray<MoveOperation*> killer; // essentially a map of register id -> MoveOperation*
323     for (int i = 0; i < _edges.length(); i++) {
324       MoveOperation* s = _edges.at(i);
325       assert(killer.at_grow(s->dst_id(), NULL) == NULL,
326              "multiple moves with the same register as destination");
327       killer.at_put_grow(s->dst_id(), s, NULL);
328     }
329     assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
330            "make sure temp isn't in the registers that are killed");
331 
332     // create links between loads and stores
333     for (int i = 0; i < _edges.length(); i++) {
334       _edges.at(i)->link(killer);
335     }
336 
337     // at this point, all the move operations are chained together
338     // in one or more doubly linked lists.  Processing them backwards finds
339     // the beginning of the chain, forwards finds the end.  If there's
340     // a cycle it can be broken at any point,  so pick an edge and walk
341     // backward until the list ends or we end where we started.
342     for (int e = 0; e < _edges.length(); e++) {
343       MoveOperation* s = _edges.at(e);
344       if (!s->is_processed()) {
345         MoveOperation* start = s;
346         // search for the beginning of the chain or cycle
347         while (start->prev() != NULL && start->prev() != s) {
348           start = start->prev();
349         }
350         if (start->prev() == s) {
351           start->break_cycle(temp_register);
352         }
353         // walk the chain forward inserting to store list
354         while (start != NULL) {
355           _moves.push(start->as_move());
356 
357           start->set_processed();
358           start = start->next();
359         }
360       }
361     }
362   }
363 
364   GrowableArray<Move> moves() {
365     return _moves;
366   }
367 };
368 
369 ArgumentShuffle::ArgumentShuffle(
370     BasicType* in_sig_bt,
371     int num_in_args,
372     BasicType* out_sig_bt,
373     int num_out_args,
374     const CallConvClosure* input_conv,
375     const CallConvClosure* output_conv,
376     VMReg shuffle_temp) {
377 
378   VMRegPair* in_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_in_args);
379   input_conv->calling_convention(in_sig_bt, in_regs, num_in_args);
380 
381   VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_out_args);
382   _out_arg_stack_slots = output_conv->calling_convention(out_sig_bt, out_regs, num_out_args);
383 
384   VMRegPair tmp_vmreg;
385   tmp_vmreg.set2(shuffle_temp);
386 
387   // Compute a valid move order, using tmp_vmreg to break any cycles.
388   // Note that ForeignCMO ignores the upper half of our VMRegPairs.
389   // We are not moving Java values here, only register-sized values,
390   // so we shouldn't have to worry about the upper half any ways.
391   // This should work fine on 32-bit as well, since we would only be
392   // moving 32-bit sized values (i.e. low-level MH shouldn't take any double/long).
393   ForeignCMO order(num_in_args, in_regs,
394                    num_out_args, out_regs,
395                    in_sig_bt, tmp_vmreg);
396   _moves = order.moves();
397 }
< prev index next >