1 /*
2 * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2019, 2022, Arm Limited. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 */
24
25 #include "code/vmreg.inline.hpp"
26 #include "runtime/jniHandles.hpp"
27 #include "runtime/jniHandles.inline.hpp"
28 #include "oops/typeArrayOop.inline.hpp"
29 #include "oops/oopCast.inline.hpp"
30 #include "prims/foreignGlobals.hpp"
31 #include "prims/foreignGlobals.inline.hpp"
32 #include "prims/vmstorage.hpp"
33 #include "utilities/formatBuffer.hpp"
34
35 bool ForeignGlobals::is_foreign_linker_supported() {
36 return true;
37 }
38
39 bool ABIDescriptor::is_volatile_reg(Register reg) const {
40 return _integer_argument_registers.contains(reg)
41 || _integer_additional_volatile_registers.contains(reg);
42 }
43
44 bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
45 return _vector_argument_registers.contains(reg)
46 || _vector_additional_volatile_registers.contains(reg);
47 }
48
49 const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
50 oop abi_oop = JNIHandles::resolve_non_null(jabi);
51 ABIDescriptor abi;
52
53 refArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
54 parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
55 parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_FloatRegister);
56
57 refArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
58 parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
59 parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_FloatRegister);
60
61 refArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
62 parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
63 parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_FloatRegister);
64
65 abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
66 abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
67
68 abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
69 abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
70
71 return abi;
72 }
73
74 int RegSpiller::pd_reg_size(VMStorage reg) {
75 if (reg.type() == StorageType::INTEGER) {
76 return 8;
77 } else if (reg.type() == StorageType::VECTOR) {
78 return 16; // Always spill/unspill Q registers
79 }
80 return 0; // stack and BAD
81 }
82
83 void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
84 if (reg.type() == StorageType::INTEGER) {
85 masm->spill(as_Register(reg), true, offset);
86 } else if (reg.type() == StorageType::VECTOR) {
87 masm->spill(as_FloatRegister(reg), masm->Q, offset);
88 } else {
89 // stack and BAD
90 }
91 }
92
93 void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
94 if (reg.type() == StorageType::INTEGER) {
95 masm->unspill(as_Register(reg), true, offset);
96 } else if (reg.type() == StorageType::VECTOR) {
97 masm->unspill(as_FloatRegister(reg), masm->Q, offset);
98 } else {
99 // stack and BAD
100 }
101 }
102
103 static constexpr int RFP_BIAS = 16; // skip old rfp and lr
104
105 static void move_reg64(MacroAssembler* masm, int out_stk_bias,
106 Register from_reg, VMStorage to_reg) {
107 int out_bias = 0;
108 switch (to_reg.type()) {
109 case StorageType::INTEGER:
110 assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
111 masm->mov(as_Register(to_reg), from_reg);
112 break;
113 case StorageType::STACK:
114 out_bias = out_stk_bias;
115 case StorageType::FRAME_DATA: {
116 Address dest(sp, to_reg.offset() + out_bias);
117 switch (to_reg.stack_size()) {
118 case 8: masm->str (from_reg, dest); break;
119 case 4: masm->strw(from_reg, dest); break;
120 case 2: masm->strh(from_reg, dest); break;
121 case 1: masm->strb(from_reg, dest); break;
122 default: ShouldNotReachHere();
123 }
124 } break;
125 default: ShouldNotReachHere();
126 }
127 }
128
129 static void move_stack(MacroAssembler* masm, Register tmp_reg, int in_stk_bias, int out_stk_bias,
130 VMStorage from_reg, VMStorage to_reg) {
131 Address from_addr(rfp, RFP_BIAS + from_reg.offset() + in_stk_bias);
132 int out_bias = 0;
133 switch (to_reg.type()) {
134 case StorageType::INTEGER:
135 assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
136 switch (from_reg.stack_size()) {
137 case 8: masm->ldr (as_Register(to_reg), from_addr); break;
138 case 4: masm->ldrw(as_Register(to_reg), from_addr); break;
139 case 2: masm->ldrh(as_Register(to_reg), from_addr); break;
140 case 1: masm->ldrb(as_Register(to_reg), from_addr); break;
141 default: ShouldNotReachHere();
142 }
143 break;
144 case StorageType::VECTOR:
145 assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
146 switch (from_reg.stack_size()) {
147 case 8:
148 masm->ldrd(as_FloatRegister(to_reg), from_addr);
149 break;
150 case 4:
151 masm->ldrs(as_FloatRegister(to_reg), from_addr);
152 break;
153 default: ShouldNotReachHere();
154 }
155 break;
156 case StorageType::STACK:
157 out_bias = out_stk_bias;
158 case StorageType::FRAME_DATA: {
159 switch (from_reg.stack_size()) {
160 case 8: masm->ldr (tmp_reg, from_addr); break;
161 case 4: masm->ldrw(tmp_reg, from_addr); break;
162 case 2: masm->ldrh(tmp_reg, from_addr); break;
163 case 1: masm->ldrb(tmp_reg, from_addr); break;
164 default: ShouldNotReachHere();
165 }
166 Address dest(sp, to_reg.offset() + out_bias);
167 switch (to_reg.stack_size()) {
168 case 8: masm->str (tmp_reg, dest); break;
169 case 4: masm->strw(tmp_reg, dest); break;
170 case 2: masm->strh(tmp_reg, dest); break;
171 case 1: masm->strb(tmp_reg, dest); break;
172 default: ShouldNotReachHere();
173 }
174 } break;
175 default: ShouldNotReachHere();
176 }
177 }
178
179 static void move_v128(MacroAssembler* masm, int out_stk_bias,
180 FloatRegister from_reg, VMStorage to_reg) {
181 switch (to_reg.type()) {
182 case StorageType::INTEGER:
183 assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
184 masm->fmovd(as_Register(to_reg), from_reg);
185 break;
186 case StorageType::VECTOR:
187 assert(to_reg.segment_mask() == V128_MASK, "only moves to v128 registers supported");
188 masm->fmovd(as_FloatRegister(to_reg), from_reg);
189 break;
190 case StorageType::STACK: {
191 Address dest(sp, to_reg.offset() + out_stk_bias);
192 switch (to_reg.stack_size()) {
193 case 8: masm->strd(from_reg, dest); break;
194 case 4: masm->strs(from_reg, dest); break;
195 default: ShouldNotReachHere();
196 }
197 } break;
198 default: ShouldNotReachHere();
199 }
200 }
201
202 void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias) const {
203 Register tmp_reg = as_Register(tmp);
204 for (int i = 0; i < _moves.length(); i++) {
205 Move move = _moves.at(i);
206 VMStorage from_reg = move.from;
207 VMStorage to_reg = move.to;
208
209 switch (from_reg.type()) {
210 case StorageType::INTEGER:
211 assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
212 move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
213 break;
214 case StorageType::VECTOR:
215 assert(from_reg.segment_mask() == V128_MASK, "only v128 register supported");
216 move_v128(masm, out_stk_bias, as_FloatRegister(from_reg), to_reg);
217 break;
218 case StorageType::STACK:
219 move_stack(masm, tmp_reg, in_stk_bias, out_stk_bias, from_reg, to_reg);
220 break;
221 default: ShouldNotReachHere();
222 }
223 }
224 }