1 /*
2 * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "classfile/javaClasses.hpp"
25 #include "memory/resourceArea.hpp"
26 #include "prims/foreignGlobals.inline.hpp"
27 #include "runtime/jniHandles.inline.hpp"
28 #include "utilities/hashTable.hpp"
29
30 StubLocations::StubLocations() {
31 for (uint32_t i = 0; i < LOCATION_LIMIT; i++) {
32 _locs[i] = VMStorage::invalid();
33 }
34 }
35
36 void StubLocations::set(uint32_t loc, VMStorage storage) {
37 assert(loc < LOCATION_LIMIT, "oob");
38 _locs[loc] = storage;
39 }
40
41 void StubLocations::set_frame_data(uint32_t loc, int offset) {
42 set(loc, VMStorage(StorageType::FRAME_DATA, 8, offset));
43 }
44
45 VMStorage StubLocations::get(uint32_t loc) const {
46 assert(loc < LOCATION_LIMIT, "oob");
47 VMStorage storage = _locs[loc];
48 assert(storage.is_valid(), "not set");
49 return storage;
50 }
51
52 VMStorage StubLocations::get(VMStorage placeholder) const {
53 assert(placeholder.type() == StorageType::PLACEHOLDER, "must be");
54 return get(placeholder.index());
55 }
56
57 int StubLocations::data_offset(uint32_t loc) const {
58 VMStorage storage = get(loc);
59 assert(storage.type() == StorageType::FRAME_DATA, "must be");
60 return storage.offset();
61 }
62
63 #define FOREIGN_ABI "jdk/internal/foreign/abi/"
64
65 const CallRegs ForeignGlobals::parse_call_regs(jobject jconv) {
66 oop conv_oop = JNIHandles::resolve_non_null(jconv);
67 refArrayOop arg_regs_oop = jdk_internal_foreign_abi_CallConv::argRegs(conv_oop);
68 refArrayOop ret_regs_oop = jdk_internal_foreign_abi_CallConv::retRegs(conv_oop);
69 int num_args = arg_regs_oop->length();
70 int num_rets = ret_regs_oop->length();
71 CallRegs result(num_args, num_rets);
72
73 for (int i = 0; i < num_args; i++) {
74 result._arg_regs.push(parse_vmstorage(arg_regs_oop->obj_at(i)));
75 }
76
77 for (int i = 0; i < num_rets; i++) {
78 result._ret_regs.push(parse_vmstorage(ret_regs_oop->obj_at(i)));
79 }
80
81 return result;
82 }
83
84 VMStorage ForeignGlobals::parse_vmstorage(oop storage) {
85 jbyte type = jdk_internal_foreign_abi_VMStorage::type(storage);
86 jshort segment_mask_or_size = jdk_internal_foreign_abi_VMStorage::segment_mask_or_size(storage);
87 jint index_or_offset = jdk_internal_foreign_abi_VMStorage::index_or_offset(storage);
88
89 return VMStorage(static_cast<StorageType>(type), segment_mask_or_size, index_or_offset);
90 }
91
92 int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) {
93 int result_size = 0;
94 for (int i = 0; i < regs.length(); i++) {
95 result_size += pd_reg_size(regs.at(i));
96 }
97 return result_size;
98 }
99
100 void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
101 assert(rsp_offset != -1, "rsp_offset should be set");
102 int offset = rsp_offset;
103 for (int i = 0; i < _regs.length(); i++) {
104 VMStorage reg = _regs.at(i);
105 if (spill) {
106 pd_store_reg(masm, offset, reg);
107 } else {
108 pd_load_reg(masm, offset, reg);
109 }
110 offset += pd_reg_size(reg);
111 }
112 }
113
114 void ArgumentShuffle::print_on(outputStream* os) const {
115 os->print_cr("Argument shuffle {");
116 for (int i = 0; i < _moves.length(); i++) {
117 Move move = _moves.at(i);
118 VMStorage from_reg = move.from;
119 VMStorage to_reg = move.to;
120
121 os->print("Move from ");
122 from_reg.print_on(os);
123 os->print(" to ");
124 to_reg.print_on(os);
125 os->print_cr("");
126 }
127 os->print_cr("}");
128 }
129
130 int ForeignGlobals::compute_out_arg_bytes(const GrowableArray<VMStorage>& out_regs) {
131 uint32_t max_stack_offset = 0;
132 for (VMStorage reg : out_regs) {
133 if (reg.is_stack())
134 max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size());
135 }
136 return align_up(max_stack_offset, 8);
137 }
138
139 int ForeignGlobals::java_calling_convention(const BasicType* signature, int num_args, GrowableArray<VMStorage>& out_regs) {
140 VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
141 int slots = align_up(SharedRuntime::java_calling_convention(signature, vm_regs, num_args), 2);
142 for (int i = 0; i < num_args; i++) {
143 VMRegPair pair = vm_regs[i];
144 // note, we ignore second here. Signature should consist of register-size values. So there should be
145 // no need for multi-register pairs.
146 if (signature[i] != T_VOID) {
147 out_regs.push(as_VMStorage(pair.first(), signature[i]));
148 }
149 }
150 return slots << LogBytesPerInt;
151 }
152
153 GrowableArray<VMStorage> ForeignGlobals::replace_place_holders(const GrowableArray<VMStorage>& regs, const StubLocations& locs) {
154 GrowableArray<VMStorage> result(regs.length());
155 for (VMStorage reg : regs) {
156 result.push(reg.type() == StorageType::PLACEHOLDER ? locs.get(reg) : reg);
157 }
158 return result;
159 }
160
161 GrowableArray<VMStorage> ForeignGlobals::upcall_filter_receiver_reg(const GrowableArray<VMStorage>& unfiltered_regs) {
162 GrowableArray<VMStorage> out(unfiltered_regs.length() - 1);
163 // drop first arg reg
164 for (int i = 1; i < unfiltered_regs.length(); i++) {
165 out.push(unfiltered_regs.at(i));
166 }
167 return out;
168 }
169
170 GrowableArray<VMStorage> ForeignGlobals::downcall_filter_offset_regs(const GrowableArray<VMStorage>& regs,
171 BasicType* signature, int num_args,
172 bool& has_objects) {
173 GrowableArray<VMStorage> result(regs.length());
174 int reg_idx = 0;
175 for (int sig_idx = 0; sig_idx < num_args; sig_idx++) {
176 if (signature[sig_idx] == T_VOID) {
177 continue; // ignore upper halves
178 }
179
180 result.push(regs.at(reg_idx++));
181 if (signature[sig_idx] == T_OBJECT) {
182 has_objects = true;
183 sig_idx++; // skip offset
184 reg_idx++;
185 }
186 }
187 return result;
188 }
189
190 class ArgumentShuffle::ComputeMoveOrder: public StackObj {
191 class MoveOperation;
192
193 // segment_mask_or_size is not taken into account since
194 // VMStorages that differ only in mask or size can still
195 // conflict
196 static inline unsigned hash(const VMStorage& vms) {
197 return static_cast<unsigned int>(vms.type()) ^ vms.index_or_offset();
198 }
199 static inline bool equals(const VMStorage& a, const VMStorage& b) {
200 return a.type() == b.type() && a.index_or_offset() == b.index_or_offset();
201 }
202
203 using KillerTable = HashTable<
204 VMStorage, MoveOperation*,
205 32, // doesn't need to be big. don't have that many argument registers (in known ABIs)
206 AnyObj::RESOURCE_AREA,
207 mtInternal,
208 ComputeMoveOrder::hash,
209 ComputeMoveOrder::equals
210 >;
211
212 class MoveOperation: public ResourceObj {
213 friend class ComputeMoveOrder;
214 private:
215 VMStorage _src;
216 VMStorage _dst;
217 bool _processed;
218 MoveOperation* _next;
219 MoveOperation* _prev;
220
221 public:
222 MoveOperation(VMStorage src, VMStorage dst):
223 _src(src), _dst(dst), _processed(false), _next(nullptr), _prev(nullptr) {}
224
225 const VMStorage& src() const { return _src; }
226 const VMStorage& dst() const { return _dst; }
227 MoveOperation* next() const { return _next; }
228 MoveOperation* prev() const { return _prev; }
229 void set_processed() { _processed = true; }
230 bool is_processed() const { return _processed; }
231
232 // insert
233 void break_cycle(VMStorage temp_register) {
234 // create a new store following the last store
235 // to move from the temp_register to the original
236 MoveOperation* new_store = new MoveOperation(temp_register, _dst);
237
238 // break the cycle of links and insert new_store at the end
239 // break the reverse link.
240 MoveOperation* p = prev();
241 assert(p->next() == this, "must be");
242 _prev = nullptr;
243 p->_next = new_store;
244 new_store->_prev = p;
245
246 // change the original store to save it's value in the temp.
247 _dst = temp_register;
248 }
249
250 void link(KillerTable& killer) {
251 // link this store in front the store that it depends on
252 MoveOperation** n = killer.get(_src);
253 if (n != nullptr) {
254 MoveOperation* src_killer = *n;
255 assert(_next == nullptr && src_killer->_prev == nullptr, "shouldn't have been set yet");
256 _next = src_killer;
257 src_killer->_prev = this;
258 }
259 }
260
261 Move as_move() {
262 return {_src, _dst};
263 }
264 };
265
266 private:
267 const GrowableArray<VMStorage>& _in_regs;
268 const GrowableArray<VMStorage>& _out_regs;
269 VMStorage _tmp_vmreg;
270 GrowableArray<MoveOperation*> _edges;
271 GrowableArray<Move> _moves;
272
273 public:
274 ComputeMoveOrder(const GrowableArray<VMStorage>& in_regs,
275 const GrowableArray<VMStorage>& out_regs,
276 VMStorage tmp_vmreg) :
277 _in_regs(in_regs),
278 _out_regs(out_regs),
279 _tmp_vmreg(tmp_vmreg),
280 _edges(in_regs.length()),
281 _moves(in_regs.length()) {
282 assert(in_regs.length() == out_regs.length(),
283 "stray registers? %d != %d", in_regs.length(), out_regs.length());
284 }
285
286 void compute() {
287 for (int i = 0; i < _in_regs.length(); i++) {
288 VMStorage in_reg = _in_regs.at(i);
289 VMStorage out_reg = _out_regs.at(i);
290
291 if (out_reg.is_stack() || out_reg.is_frame_data()) {
292 // Move operations where the dest is the stack can all be
293 // scheduled first since they can't interfere with the other moves.
294 // The input and output stack spaces are distinct from each other.
295 Move move{in_reg, out_reg};
296 _moves.push(move);
297 } else if (in_reg == out_reg) {
298 // Can skip non-stack identity moves.
299 continue;
300 } else {
301 _edges.append(new MoveOperation(in_reg, out_reg));
302 }
303 }
304 // Break any cycles in the register moves and emit the in the
305 // proper order.
306 compute_store_order();
307 }
308
309 // Walk the edges breaking cycles between moves. The result list
310 // can be walked in order to produce the proper set of loads
311 void compute_store_order() {
312 // Record which moves kill which registers
313 KillerTable killer; // a map of VMStorage -> MoveOperation*
314 for (int i = 0; i < _edges.length(); i++) {
315 MoveOperation* s = _edges.at(i);
316 assert(!killer.contains(s->dst()),
317 "multiple moves with the same register as destination");
318 killer.put(s->dst(), s);
319 }
320 assert(!killer.contains(_tmp_vmreg),
321 "make sure temp isn't in the registers that are killed");
322
323 // create links between loads and stores
324 for (int i = 0; i < _edges.length(); i++) {
325 _edges.at(i)->link(killer);
326 }
327
328 // at this point, all the move operations are chained together
329 // in one or more doubly linked lists. Processing them backwards finds
330 // the beginning of the chain, forwards finds the end. If there's
331 // a cycle it can be broken at any point, so pick an edge and walk
332 // backward until the list ends or we end where we started.
333 for (int e = 0; e < _edges.length(); e++) {
334 MoveOperation* s = _edges.at(e);
335 if (!s->is_processed()) {
336 MoveOperation* start = s;
337 // search for the beginning of the chain or cycle
338 while (start->prev() != nullptr && start->prev() != s) {
339 start = start->prev();
340 }
341 if (start->prev() == s) {
342 start->break_cycle(_tmp_vmreg);
343 }
344 // walk the chain forward inserting to store list
345 while (start != nullptr) {
346 _moves.push(start->as_move());
347
348 start->set_processed();
349 start = start->next();
350 }
351 }
352 }
353 }
354
355 public:
356 static GrowableArray<Move> compute_move_order(const GrowableArray<VMStorage>& in_regs,
357 const GrowableArray<VMStorage>& out_regs,
358 VMStorage tmp_vmreg) {
359 ComputeMoveOrder cmo(in_regs, out_regs, tmp_vmreg);
360 cmo.compute();
361 return cmo._moves;
362 }
363 };
364
365 ArgumentShuffle::ArgumentShuffle(
366 const GrowableArray<VMStorage>& in_regs,
367 const GrowableArray<VMStorage>& out_regs,
368 VMStorage shuffle_temp) {
369 _moves = ComputeMoveOrder::compute_move_order(in_regs, out_regs, shuffle_temp);
370 }