1 /*
  2  * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "classfile/javaClasses.hpp"
 25 #include "runtime/jniHandles.inline.hpp"
 26 #include "oops/typeArrayOop.inline.hpp"
 27 #include "oops/oopCast.inline.hpp"
 28 #include "prims/foreignGlobals.inline.hpp"
 29 #include "runtime/sharedRuntime.hpp"
 30 #include "utilities/formatBuffer.hpp"
 31 
 32 bool ForeignGlobals::is_foreign_linker_supported() {
 33   return true;
 34 }
 35 
 36 bool ABIDescriptor::is_volatile_reg(Register reg) const {
 37     return _integer_argument_registers.contains(reg)
 38         || _integer_additional_volatile_registers.contains(reg);
 39 }
 40 
 41 bool ABIDescriptor::is_volatile_reg(XMMRegister reg) const {
 42     return _vector_argument_registers.contains(reg)
 43         || _vector_additional_volatile_registers.contains(reg);
 44 }
 45 
 46 const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
 47   oop abi_oop = JNIHandles::resolve_non_null(jabi);
 48   ABIDescriptor abi;
 49 
 50   objArrayOop inputStorage = jdk_internal_foreign_abi_ABIDescriptor::inputStorage(abi_oop);
 51   parse_register_array(inputStorage, StorageType::INTEGER, abi._integer_argument_registers, as_Register);
 52   parse_register_array(inputStorage, StorageType::VECTOR, abi._vector_argument_registers, as_XMMRegister);
 53 
 54   objArrayOop outputStorage = jdk_internal_foreign_abi_ABIDescriptor::outputStorage(abi_oop);
 55   parse_register_array(outputStorage, StorageType::INTEGER, abi._integer_return_registers, as_Register);
 56   parse_register_array(outputStorage, StorageType::VECTOR, abi._vector_return_registers, as_XMMRegister);
 57   objArrayOop subarray = oop_cast<objArrayOop>(outputStorage->obj_at((int) StorageType::X87));
 58   abi._X87_return_registers_noof = subarray->length();
 59 
 60   objArrayOop volatileStorage = jdk_internal_foreign_abi_ABIDescriptor::volatileStorage(abi_oop);
 61   parse_register_array(volatileStorage, StorageType::INTEGER, abi._integer_additional_volatile_registers, as_Register);
 62   parse_register_array(volatileStorage, StorageType::VECTOR, abi._vector_additional_volatile_registers, as_XMMRegister);
 63 
 64   abi._stack_alignment_bytes = jdk_internal_foreign_abi_ABIDescriptor::stackAlignment(abi_oop);
 65   abi._shadow_space_bytes = jdk_internal_foreign_abi_ABIDescriptor::shadowSpace(abi_oop);
 66 
 67   abi._scratch1 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch1(abi_oop));
 68   abi._scratch2 = parse_vmstorage(jdk_internal_foreign_abi_ABIDescriptor::scratch2(abi_oop));
 69 
 70   return abi;
 71 }
 72 
 73 int RegSpiller::pd_reg_size(VMStorage reg) {
 74   if (reg.type() == StorageType::INTEGER) {
 75     return 8;
 76   } else if (reg.type() == StorageType::VECTOR) {
 77     return 16;
 78   }
 79   return 0; // stack and BAD
 80 }
 81 
 82 void RegSpiller::pd_store_reg(MacroAssembler* masm, int offset, VMStorage reg) {
 83   if (reg.type() == StorageType::INTEGER) {
 84     masm->movptr(Address(rsp, offset), as_Register(reg));
 85   } else if (reg.type() == StorageType::VECTOR) {
 86     masm->movdqu(Address(rsp, offset), as_XMMRegister(reg));
 87   } else {
 88     // stack and BAD
 89   }
 90 }
 91 
 92 void RegSpiller::pd_load_reg(MacroAssembler* masm, int offset, VMStorage reg) {
 93   if (reg.type() == StorageType::INTEGER) {
 94     masm->movptr(as_Register(reg), Address(rsp, offset));
 95   } else if (reg.type() == StorageType::VECTOR) {
 96     masm->movdqu(as_XMMRegister(reg), Address(rsp, offset));
 97   } else {
 98     // stack and BAD
 99   }
100 }
101 
102 static constexpr int RBP_BIAS = 16; // skip old rbp and return address
103 
104 static void move_reg64(MacroAssembler* masm, int out_stk_bias,
105                        Register from_reg, VMStorage to_reg) {
106   int out_bias = 0;
107   switch (to_reg.type()) {
108     case StorageType::INTEGER:
109       assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
110       masm->movq(as_Register(to_reg), from_reg);
111       break;
112     case StorageType::STACK:
113       out_bias = out_stk_bias;
114     case StorageType::FRAME_DATA:
115       assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
116       masm->movq(Address(rsp, to_reg.offset() + out_bias), from_reg);
117       break;
118     default: ShouldNotReachHere();
119   }
120 }
121 
122 static void move_stack64(MacroAssembler* masm, Register tmp_reg, int out_stk_bias,
123                          Address from_address, VMStorage to_reg) {
124   int out_bias = 0;
125   switch (to_reg.type()) {
126     case StorageType::INTEGER:
127       assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
128       masm->movq(as_Register(to_reg), from_address);
129       break;
130     case StorageType::VECTOR:
131       assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
132       masm->movdqu(as_XMMRegister(to_reg), from_address);
133       break;
134     case StorageType::STACK:
135       out_bias = out_stk_bias;
136     case StorageType::FRAME_DATA:
137       assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
138       masm->movq(tmp_reg, from_address);
139       masm->movq(Address(rsp, to_reg.offset() + out_bias), tmp_reg);
140       break;
141     default: ShouldNotReachHere();
142   }
143 }
144 
145 static void move_xmm(MacroAssembler* masm, int out_stk_bias,
146                      XMMRegister from_reg, VMStorage to_reg) {
147   switch (to_reg.type()) {
148     case StorageType::INTEGER: // windows vargarg floats
149       assert(to_reg.segment_mask() == REG64_MASK, "only moves to 64-bit registers supported");
150       masm->movq(as_Register(to_reg), from_reg);
151       break;
152     case StorageType::VECTOR:
153       assert(to_reg.segment_mask() == XMM_MASK, "only moves to xmm registers supported");
154       masm->movdqu(as_XMMRegister(to_reg), from_reg);
155       break;
156     case StorageType::STACK:
157       assert(to_reg.stack_size() == 8, "only moves with 64-bit targets supported");
158       masm->movq(Address(rsp, to_reg.offset() + out_stk_bias), from_reg);
159       break;
160     default: ShouldNotReachHere();
161   }
162 }
163 
164 void ArgumentShuffle::pd_generate(MacroAssembler* masm, VMStorage tmp, int in_stk_bias, int out_stk_bias) const {
165   Register tmp_reg = as_Register(tmp);
166   for (int i = 0; i < _moves.length(); i++) {
167     Move move = _moves.at(i);
168     VMStorage from_reg = move.from;
169     VMStorage to_reg   = move.to;
170 
171     switch (from_reg.type()) {
172       case StorageType::INTEGER:
173         assert(from_reg.segment_mask() == REG64_MASK, "only 64-bit register supported");
174         move_reg64(masm, out_stk_bias, as_Register(from_reg), to_reg);
175         break;
176       case StorageType::VECTOR:
177         assert(from_reg.segment_mask() == XMM_MASK, "only xmm register supported");
178         move_xmm(masm, out_stk_bias, as_XMMRegister(from_reg), to_reg);
179         break;
180       case StorageType::STACK: {
181         assert(from_reg.stack_size() == 8, "only stack_size 8 supported");
182         Address from_addr(rbp, RBP_BIAS + from_reg.offset() + in_stk_bias);
183         move_stack64(masm, tmp_reg, out_stk_bias, from_addr, to_reg);
184       } break;
185       default: ShouldNotReachHere();
186     }
187   }
188 }