1 /*
  2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "jvm.h"
 26 #include "asm/assembler.hpp"
 27 #include "asm/assembler.inline.hpp"
 28 #include "asm/macroAssembler.hpp"
 29 #include "oops/inlineKlass.inline.hpp"
 30 #include "runtime/signature_cc.hpp"
 31 #include "runtime/sharedRuntime.hpp"
 32 #ifdef COMPILER2
 33 #include "opto/compile.hpp"
 34 #include "opto/node.hpp"
 35 #endif
 36 
 37 void MacroAssembler::skip_unpacked_fields(const GrowableArray<SigEntry>* sig, int& sig_index, VMRegPair* regs_from, int regs_from_count, int& from_index) {
 38   ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
 39   VMReg reg;
 40   BasicType bt;
 41   while (stream.next(reg, bt)) {}
 42   sig_index = stream.sig_index();
 43   from_index = stream.regs_index();
 44 }
 45 
 46 bool MacroAssembler::is_reg_in_unpacked_fields(const GrowableArray<SigEntry>* sig, int sig_index, VMReg to, VMRegPair* regs_from, int regs_from_count, int from_index) {
 47   ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
 48   VMReg reg;
 49   BasicType bt;
 50   while (stream.next(reg, bt)) {
 51     if (reg == to) {
 52       return true;
 53     }
 54   }
 55   return false;
 56 }
 57 
 58 void MacroAssembler::mark_reg_writable(const VMRegPair* regs, int num_regs, int reg_index, MacroAssembler::RegState* reg_state) {
 59   assert(0 <= reg_index && reg_index < num_regs, "sanity");
 60   VMReg from_reg = regs[reg_index].first();
 61   if (from_reg->is_valid()) {
 62     assert(from_reg->is_stack(), "reserved entries must be stack");
 63     reg_state[from_reg->value()] = MacroAssembler::reg_writable;
 64   }
 65 }
 66 
 67 MacroAssembler::RegState* MacroAssembler::init_reg_state(VMRegPair* regs, int num_regs, int sp_inc, int max_stack) {
 68   int max_reg = VMRegImpl::stack2reg(max_stack)->value();
 69   MacroAssembler::RegState* reg_state = NEW_RESOURCE_ARRAY(MacroAssembler::RegState, max_reg);
 70 
 71   // Make all writable
 72   for (int i = 0; i < max_reg; ++i) {
 73     reg_state[i] = MacroAssembler::reg_writable;
 74   }
 75   // Set all source registers/stack slots to readonly to prevent accidental overwriting
 76   for (int i = 0; i < num_regs; ++i) {
 77     VMReg reg = regs[i].first();
 78     if (!reg->is_valid()) continue;
 79     if (reg->is_stack()) {
 80       // Update source stack location by adding stack increment
 81       reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size);
 82       regs[i] = reg;
 83     }
 84     assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds");
 85     reg_state[reg->value()] = MacroAssembler::reg_readonly;
 86   }
 87   return reg_state;
 88 }
 89 
 90 #ifdef COMPILER2
 91 int MacroAssembler::unpack_inline_args(Compile* C, bool receiver_only) {
 92   assert(C->has_scalarized_args(), "inline type argument scalarization is disabled");
 93   Method* method = C->method()->get_Method();
 94   const GrowableArray<SigEntry>* sig = method->adapter()->get_sig_cc();
 95   assert(sig != nullptr, "must have scalarized signature");
 96 
 97   // Get unscalarized calling convention
 98   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
 99   int args_passed = 0;
100   if (!method->is_static()) {
101     sig_bt[args_passed++] = T_OBJECT;
102   }
103   if (!receiver_only) {
104     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
105       BasicType bt = ss.type();
106       sig_bt[args_passed++] = bt;
107       if (type2size[bt] == 2) {
108         sig_bt[args_passed++] = T_VOID;
109       }
110     }
111   } else {
112     // Only unpack the receiver, all other arguments are already scalarized
113     InstanceKlass* holder = method->method_holder();
114     int rec_len = (holder->is_inline_klass() && method->is_scalarized_arg(0)) ? InlineKlass::cast(holder)->extended_sig()->length() : 1;
115     // Copy scalarized signature but skip receiver and inline type delimiters
116     for (int i = 0; i < sig->length(); i++) {
117       if (SigEntry::skip_value_delimiters(sig, i) && rec_len <= 0) {
118         sig_bt[args_passed++] = sig->at(i)._bt;
119       }
120       rec_len--;
121     }
122   }
123   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed);
124   int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed);
125 
126   // Get scalarized calling convention
127   int args_passed_cc = SigEntry::fill_sig_bt(sig, sig_bt);
128   VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig->length());
129   int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc);
130 
131   // Check if we need to extend the stack for unpacking
132   int sp_inc = 0;
133   if (args_on_stack_cc > args_on_stack) {
134     sp_inc = extend_stack_for_inline_args(args_on_stack_cc);
135   }
136   shuffle_inline_args(false, receiver_only, sig,
137                       args_passed, args_on_stack, regs,           // from
138                       args_passed_cc, args_on_stack_cc, regs_cc,  // to
139                       sp_inc, noreg);
140   return sp_inc;
141 }
142 #endif // COMPILER2
143 
144 void MacroAssembler::shuffle_inline_args(bool is_packing, bool receiver_only,
145                                          const GrowableArray<SigEntry>* sig,
146                                          int args_passed, int args_on_stack, VMRegPair* regs,
147                                          int args_passed_to, int args_on_stack_to, VMRegPair* regs_to,
148                                          int sp_inc, Register val_array) {
149   int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_to);
150   RegState* reg_state = init_reg_state(regs, args_passed, sp_inc, max_stack);
151 
152   // Emit code for packing/unpacking inline type arguments
153   // We try multiple times and eventually start spilling to resolve (circular) dependencies
154   bool done = (args_passed_to == 0);
155   for (int i = 0; i < 2*args_passed_to && !done; ++i) {
156     done = true;
157     bool spill = (i > args_passed_to); // Start spilling?
158     // Iterate over all arguments (when unpacking, do in reverse)
159     int step = is_packing ? 1 : -1;
160     int from_index    = is_packing ? 0 : args_passed      - 1;
161     int to_index      = is_packing ? 0 : args_passed_to   - 1;
162     int sig_index     = is_packing ? 0 : sig->length()    - 1;
163     int sig_index_end = is_packing ? sig->length() : -1;
164     int vtarg_index = 0;
165     for (; sig_index != sig_index_end; sig_index += step) {
166       assert(0 <= sig_index && sig_index < sig->length(), "index out of bounds");
167       if (spill) {
168         // This call returns true IFF we should keep trying to spill in this round.
169         spill = shuffle_inline_args_spill(is_packing, sig, sig_index, regs, from_index, args_passed,
170                                           reg_state);
171       }
172       BasicType bt = sig->at(sig_index)._bt;
173       if (SigEntry::skip_value_delimiters(sig, sig_index)) {
174         VMReg from_reg = regs[from_index].first();
175         if (from_reg->is_valid()) {
176           done &= move_helper(from_reg, regs_to[to_index].first(), bt, reg_state);
177         } else {
178           // halves of T_LONG or T_DOUBLE
179           assert(bt == T_VOID, "unexpected basic type");
180         }
181         to_index += step;
182         from_index += step;
183       } else if (is_packing) {
184         assert(val_array != noreg, "must be");
185         VMReg reg_to = regs_to[to_index].first();
186         done &= pack_inline_helper(sig, sig_index, vtarg_index,
187                                    regs, args_passed, from_index, reg_to,
188                                    reg_state, val_array);
189         vtarg_index++;
190         to_index++;
191       } else if (!receiver_only || (from_index == 0 && bt == T_VOID)) {
192         VMReg from_reg = regs[from_index].first();
193         done &= unpack_inline_helper(sig, sig_index,
194                                      from_reg, from_index, regs_to, args_passed_to, to_index,
195                                      reg_state);
196         if (from_index == -1 && sig_index != 0) {
197           // This can happen when we are confusing an empty inline type argument which is
198           // not counted in the scalarized signature for the receiver. Just ignore it.
199           assert(receiver_only, "sanity");
200           from_index = 0;
201         }
202       }
203     }
204   }
205   guarantee(done, "Could not resolve circular dependency when shuffling inline type arguments");
206 }
207 
208 bool MacroAssembler::shuffle_inline_args_spill(bool is_packing, const GrowableArray<SigEntry>* sig, int sig_index,
209                                                VMRegPair* regs_from, int from_index, int regs_from_count, RegState* reg_state) {
210   VMReg reg;
211   if (!is_packing || SigEntry::skip_value_delimiters(sig, sig_index)) {
212     reg = regs_from[from_index].first();
213     if (!reg->is_valid() || reg_state[reg->value()] != reg_readonly) {
214       // Spilling this won't break cycles
215       return true;
216     }
217   } else {
218     ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
219     VMReg from_reg;
220     BasicType bt;
221     bool found = false;
222     while (stream.next(from_reg, bt)) {
223       reg = from_reg;
224       assert(from_reg->is_valid(), "must be");
225       if (reg_state[from_reg->value()] == reg_readonly) {
226         found = true;
227         break;
228       }
229     }
230     if (!found) {
231       // Spilling fields in this inline type arg won't break cycles
232       return true;
233     }
234   }
235 
236   // Spill argument to be able to write the source and resolve circular dependencies
237   VMReg spill_reg = spill_reg_for(reg);
238   if (reg_state[spill_reg->value()] == reg_readonly) {
239     // We have already spilled (in previous round). The spilled register should be consumed by this round.
240   } else {
241     bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state);
242     assert(res, "Spilling should not fail");
243     // Set spill_reg as new source and update state
244     reg = spill_reg;
245     regs_from[from_index].set1(reg);
246     reg_state[reg->value()] = reg_readonly;
247   }
248 
249   return false; // Do not spill again in this round
250 }