1 /*
  2  * Copyright (c) 2020, 2025, SAP SE. All rights reserved.
  3  * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  */
 24 
 25 #include "asm/macroAssembler.inline.hpp"
 26 #include "code/vmreg.inline.hpp"
 27 #include "runtime/jniHandles.hpp"
 28 #include "runtime/jniHandles.inline.hpp"
 29 #include "oops/typeArrayOop.inline.hpp"
 30 #include "oops/oopCast.inline.hpp"
 31 #include "prims/foreignGlobals.hpp"
 32 #include "prims/foreignGlobals.inline.hpp"
 33 #include "prims/vmstorage.hpp"
 34 #include "utilities/formatBuffer.hpp"
 35 
 36 #define __ masm->
 37 
 38 bool ForeignGlobals::is_foreign_linker_supported() {
 39   return true;
 40 }
 41 
 42 // Stubbed out, implement later
 43 const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
 44   oop abi_oop = JNIHandles::resolve_non_null(jabi);
 45   ABIDescriptor abi;
 46 
 47   refArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
 48   parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
 49   parse_register_array(inputStorage, StorageType::FLOAT, abi._float_argument_registers, as_FloatRegister);
 50 
 51   refArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
 52   parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
 53   parse_register_array(outputStorage, StorageType::FLOAT, abi._float_return_registers, as_FloatRegister);
 54 
 55   abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
 56   abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
 57 
 58   abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
 59   abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
 60 
 61   return abi;
 62 }
 63 
 64 int RegSpiller::pd_reg_size(VMStorage reg) {
 65   if (reg.type() == StorageType::INTEGER || reg.type() == StorageType::FLOAT) {
 66     return 8;
 67   }
 68   return 0; // stack and BAD
 69 }
 70 
 71 void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
 72   if (reg.type() == StorageType::INTEGER) {
 73     __ std(as_Register(reg), offset, R1_SP);
 74   } else if (reg.type() == StorageType::FLOAT) {
 75     __ stfd(as_FloatRegister(reg), offset, R1_SP);
 76   } else {
 77     // stack and BAD
 78   }
 79 }
 80 
 81 void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
 82   if (reg.type() == StorageType::INTEGER) {
 83     __ ld(as_Register(reg), offset, R1_SP);
 84   } else if (reg.type() == StorageType::FLOAT) {
 85     __ lfd(as_FloatRegister(reg), offset, R1_SP);
 86   } else {
 87     // stack and BAD
 88   }
 89 }
 90 
 91 static int reg2offset(VMStorage vms, int stk_bias) {
 92   assert(!vms.is_reg(), "wrong usage");
 93   return vms.index_or_offset() + stk_bias;
 94 }
 95 
 96 static void move_reg64(MacroAssembler* masm, int out_stk_bias,
 97                        VMStorage from_reg, VMStorage to_reg) {
 98   int out_bias = 0;
 99   switch (to_reg.type()) {
100     case StorageType::INTEGER:
101       if (to_reg.segment_mask() == REG64_MASK && from_reg.segment_mask() == REG32_MASK) {
102         // see CCallingConventionRequiresIntsAsLongs
103         __ extsw(as_Register(to_reg), as_Register(from_reg));
104       } else {
105         __ mr_if_needed(as_Register(to_reg), as_Register(from_reg));
106       }
107       break;
108     case StorageType::FLOAT:
109       // FP arguments can get passed in GP reg! (Only in Upcall with HFA usage.)
110       assert(from_reg.segment_mask() == to_reg.segment_mask(), "sanity");
111       if (to_reg.segment_mask() == REG32_MASK) {
112         __ stw(as_Register(from_reg), -8, R1_SP);
113         __ lfs(as_FloatRegister(to_reg), -8, R1_SP); // convert to double precision format
114       } else {
115         __ mtfprd(as_FloatRegister(to_reg), as_Register(from_reg));
116       }
117       break;
118     case StorageType::STACK:
119       out_bias = out_stk_bias; // fallthrough
120     case StorageType::FRAME_DATA: {
121       // Integer types always get a 64 bit slot in C.
122       Register storeval = as_Register(from_reg);
123       if (from_reg.segment_mask() == REG32_MASK) {
124         // see CCallingConventionRequiresIntsAsLongs
125         __ extsw(R0, as_Register(from_reg));
126         storeval = R0;
127       }
128       switch (to_reg.stack_size()) {
129         case 8: __ std(storeval, reg2offset(to_reg, out_bias), R1_SP); break;
130         case 4: __ stw(storeval, reg2offset(to_reg, out_bias), R1_SP); break;
131         default: ShouldNotReachHere();
132       }
133     } break;
134     default: ShouldNotReachHere();
135   }
136 }
137 
138 static void move_float(MacroAssembler* masm, int out_stk_bias,
139                        VMStorage from_reg, VMStorage to_reg) {
140   switch (to_reg.type()) {
141     case StorageType::INTEGER:
142       // FP arguments can get passed in GP reg! (Only for VarArgs for which we don't use FP regs.)
143       assert(from_reg.segment_mask() == to_reg.segment_mask(), "sanity");
144       if (from_reg.segment_mask() == REG32_MASK) {
145         __ stfs(as_FloatRegister(from_reg), -8, R1_SP); // convert to single precision format
146         __ lwa(as_Register(to_reg), -8, R1_SP);
147       } else {
148         __ mffprd(as_Register(to_reg), as_FloatRegister(from_reg));
149       }
150       break;
151     case StorageType::FLOAT:
152       __ fmr_if_needed(as_FloatRegister(to_reg), as_FloatRegister(from_reg));
153       break;
154     case StorageType::STACK:
155       if (from_reg.segment_mask() == REG32_MASK) {
156         assert(to_reg.stack_size() == 4, "size should match");
157         // Note: Argument::float_on_stack_offset_in_bytes_c is handled by CallArranger
158         __ stfs(as_FloatRegister(from_reg), reg2offset(to_reg, out_stk_bias), R1_SP);
159       } else {
160         assert(to_reg.stack_size() == 8, "size should match");
161         __ stfd(as_FloatRegister(from_reg), reg2offset(to_reg, out_stk_bias), R1_SP);
162       }
163       break;
164     default: ShouldNotReachHere();
165   }
166 }
167 
168 static void move_stack(MacroAssembler* masm, Register callerSP, int in_stk_bias, int out_stk_bias,
169                        VMStorage from_reg, VMStorage to_reg) {
170   int out_bias = 0;
171   switch (to_reg.type()) {
172     case StorageType::INTEGER:
173       switch (from_reg.stack_size()) {
174         case 8: __ ld( as_Register(to_reg), reg2offset(from_reg, in_stk_bias), callerSP); break;
175         case 4: __ lwa(as_Register(to_reg), reg2offset(from_reg, in_stk_bias), callerSP); break;
176         default: ShouldNotReachHere();
177       }
178       break;
179     case StorageType::FLOAT:
180       switch (from_reg.stack_size()) {
181         case 8: __ lfd(as_FloatRegister(to_reg), reg2offset(from_reg, in_stk_bias), callerSP); break;
182         // Note: Argument::float_on_stack_offset_in_bytes_c is handled by CallArranger
183         case 4: __ lfs(as_FloatRegister(to_reg), reg2offset(from_reg, in_stk_bias), callerSP); break;
184         default: ShouldNotReachHere();
185       }
186       break;
187     case StorageType::STACK:
188       out_bias = out_stk_bias; // fallthrough
189     case StorageType::FRAME_DATA: {
190       switch (from_reg.stack_size()) {
191         case 8: __ ld( R0, reg2offset(from_reg, in_stk_bias), callerSP); break;
192         case 4: __ lwa(R0, reg2offset(from_reg, in_stk_bias), callerSP); break;
193         default: ShouldNotReachHere();
194       }
195       switch (to_reg.stack_size()) {
196         case 8: __ std(R0, reg2offset(to_reg, out_bias), R1_SP); break;
197         case 4: __ stw(R0, reg2offset(to_reg, out_bias), R1_SP); break;
198         default: ShouldNotReachHere();
199       }
200     } break;
201     default: ShouldNotReachHere();
202   }
203 }
204 
205 void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias) const {
206   Register callerSP = as_Register(tmp); // preset
207   for (int i = 0; i < _moves.length(); i++) {
208     Move move = _moves.at(i);
209     VMStorage from_reg = move.from;
210     VMStorage to_reg   = move.to;
211 
212     switch (from_reg.type()) {
213       case StorageType::INTEGER:
214         move_reg64(masm, out_stk_bias, from_reg, to_reg);
215         break;
216       case StorageType::FLOAT:
217         move_float(masm, out_stk_bias, from_reg, to_reg);
218         break;
219       case StorageType::STACK:
220         move_stack(masm, callerSP, in_stk_bias, out_stk_bias, from_reg, to_reg);
221         break;
222       default: ShouldNotReachHere();
223     }
224   }
225 }