1 /*
  2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jvm.h"
 27 #include "asm/assembler.hpp"
 28 #include "asm/assembler.inline.hpp"
 29 #include "asm/macroAssembler.hpp"
 30 #include "oops/inlineKlass.inline.hpp"
 31 #include "runtime/signature_cc.hpp"
 32 #include "runtime/sharedRuntime.hpp"
 33 #ifdef COMPILER2
 34 #include "opto/compile.hpp"
 35 #include "opto/node.hpp"
 36 #endif
 37 
 38 void MacroAssembler::skip_unpacked_fields(const GrowableArray<SigEntry>* sig, int& sig_index, VMRegPair* regs_from, int regs_from_count, int& from_index) {
 39   ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
 40   VMReg reg;
 41   BasicType bt;
 42   while (stream.next(reg, bt)) {}
 43   sig_index = stream.sig_index();
 44   from_index = stream.regs_index();
 45 }
 46 
 47 bool MacroAssembler::is_reg_in_unpacked_fields(const GrowableArray<SigEntry>* sig, int sig_index, VMReg to, VMRegPair* regs_from, int regs_from_count, int from_index) {
 48   ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
 49   VMReg reg;
 50   BasicType bt;
 51   while (stream.next(reg, bt)) {
 52     if (reg == to) {
 53       return true;
 54     }
 55   }
 56   return false;
 57 }
 58 
 59 void MacroAssembler::mark_reg_writable(const VMRegPair* regs, int num_regs, int reg_index, MacroAssembler::RegState* reg_state) {
 60   assert(0 <= reg_index && reg_index < num_regs, "sanity");
 61   VMReg from_reg = regs[reg_index].first();
 62   if (from_reg->is_valid()) {
 63     assert(from_reg->is_stack(), "reserved entries must be stack");
 64     reg_state[from_reg->value()] = MacroAssembler::reg_writable;
 65   }
 66 }
 67 
 68 MacroAssembler::RegState* MacroAssembler::init_reg_state(VMRegPair* regs, int num_regs, int sp_inc, int max_stack) {
 69   int max_reg = VMRegImpl::stack2reg(max_stack)->value();
 70   MacroAssembler::RegState* reg_state = NEW_RESOURCE_ARRAY(MacroAssembler::RegState, max_reg);
 71 
 72   // Make all writable
 73   for (int i = 0; i < max_reg; ++i) {
 74     reg_state[i] = MacroAssembler::reg_writable;
 75   }
 76   // Set all source registers/stack slots to readonly to prevent accidental overwriting
 77   for (int i = 0; i < num_regs; ++i) {
 78     VMReg reg = regs[i].first();
 79     if (!reg->is_valid()) continue;
 80     if (reg->is_stack()) {
 81       // Update source stack location by adding stack increment
 82       reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size);
 83       regs[i] = reg;
 84     }
 85     assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds");
 86     reg_state[reg->value()] = MacroAssembler::reg_readonly;
 87   }
 88   return reg_state;
 89 }
 90 
 91 #ifdef COMPILER2
 92 int MacroAssembler::unpack_inline_args(Compile* C, bool receiver_only) {
 93   assert(C->has_scalarized_args(), "inline type argument scalarization is disabled");
 94   Method* method = C->method()->get_Method();
 95   const GrowableArray<SigEntry>* sig = method->adapter()->get_sig_cc();
 96   assert(sig != nullptr, "must have scalarized signature");
 97 
 98   // Get unscalarized calling convention
 99   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
100   int args_passed = 0;
101   if (!method->is_static()) {
102     sig_bt[args_passed++] = T_OBJECT;
103   }
104   if (!receiver_only) {
105     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
106       BasicType bt = ss.type();
107       sig_bt[args_passed++] = bt;
108       if (type2size[bt] == 2) {
109         sig_bt[args_passed++] = T_VOID;
110       }
111     }
112   } else {
113     // Only unpack the receiver, all other arguments are already scalarized
114     InstanceKlass* holder = method->method_holder();
115     int rec_len = (holder->is_inline_klass() && method->is_scalarized_arg(0)) ? InlineKlass::cast(holder)->extended_sig()->length() : 1;
116     // Copy scalarized signature but skip receiver and inline type delimiters
117     for (int i = 0; i < sig->length(); i++) {
118       if (SigEntry::skip_value_delimiters(sig, i) && rec_len <= 0) {
119         sig_bt[args_passed++] = sig->at(i)._bt;
120       }
121       rec_len--;
122     }
123   }
124   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed);
125   int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed);
126 
127   // Get scalarized calling convention
128   int args_passed_cc = SigEntry::fill_sig_bt(sig, sig_bt);
129   VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig->length());
130   int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc);
131 
132   // Check if we need to extend the stack for unpacking
133   int sp_inc = 0;
134   if (args_on_stack_cc > args_on_stack) {
135     sp_inc = extend_stack_for_inline_args(args_on_stack_cc);
136   }
137   shuffle_inline_args(false, receiver_only, sig,
138                       args_passed, args_on_stack, regs,           // from
139                       args_passed_cc, args_on_stack_cc, regs_cc,  // to
140                       sp_inc, noreg);
141   return sp_inc;
142 }
143 #endif // COMPILER2
144 
145 void MacroAssembler::shuffle_inline_args(bool is_packing, bool receiver_only,
146                                          const GrowableArray<SigEntry>* sig,
147                                          int args_passed, int args_on_stack, VMRegPair* regs,
148                                          int args_passed_to, int args_on_stack_to, VMRegPair* regs_to,
149                                          int sp_inc, Register val_array) {
150   int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_to);
151   RegState* reg_state = init_reg_state(regs, args_passed, sp_inc, max_stack);
152 
153   // Emit code for packing/unpacking inline type arguments
154   // We try multiple times and eventually start spilling to resolve (circular) dependencies
155   bool done = (args_passed_to == 0);
156   for (int i = 0; i < 2*args_passed_to && !done; ++i) {
157     done = true;
158     bool spill = (i > args_passed_to); // Start spilling?
159     // Iterate over all arguments (when unpacking, do in reverse)
160     int step = is_packing ? 1 : -1;
161     int from_index    = is_packing ? 0 : args_passed      - 1;
162     int to_index      = is_packing ? 0 : args_passed_to   - 1;
163     int sig_index     = is_packing ? 0 : sig->length()    - 1;
164     int sig_index_end = is_packing ? sig->length() : -1;
165     int vtarg_index = 0;
166     for (; sig_index != sig_index_end; sig_index += step) {
167       assert(0 <= sig_index && sig_index < sig->length(), "index out of bounds");
168       if (spill) {
169         // This call returns true IFF we should keep trying to spill in this round.
170         spill = shuffle_inline_args_spill(is_packing, sig, sig_index, regs, from_index, args_passed,
171                                           reg_state);
172       }
173       BasicType bt = sig->at(sig_index)._bt;
174       if (SigEntry::skip_value_delimiters(sig, sig_index)) {
175         VMReg from_reg = regs[from_index].first();
176         if (from_reg->is_valid()) {
177           done &= move_helper(from_reg, regs_to[to_index].first(), bt, reg_state);
178         } else {
179           // halves of T_LONG or T_DOUBLE
180           assert(bt == T_VOID, "unexpected basic type");
181         }
182         to_index += step;
183         from_index += step;
184       } else if (is_packing) {
185         assert(val_array != noreg, "must be");
186         VMReg reg_to = regs_to[to_index].first();
187         done &= pack_inline_helper(sig, sig_index, vtarg_index,
188                                    regs, args_passed, from_index, reg_to,
189                                    reg_state, val_array);
190         vtarg_index++;
191         to_index++;
192       } else if (!receiver_only || (from_index == 0 && bt == T_VOID)) {
193         VMReg from_reg = regs[from_index].first();
194         done &= unpack_inline_helper(sig, sig_index,
195                                      from_reg, from_index, regs_to, args_passed_to, to_index,
196                                      reg_state);
197         if (from_index == -1 && sig_index != 0) {
198           // This can happen when we are confusing an empty inline type argument which is
199           // not counted in the scalarized signature for the receiver. Just ignore it.
200           assert(receiver_only, "sanity");
201           from_index = 0;
202         }
203       }
204     }
205   }
206   guarantee(done, "Could not resolve circular dependency when shuffling inline type arguments");
207 }
208 
209 bool MacroAssembler::shuffle_inline_args_spill(bool is_packing, const GrowableArray<SigEntry>* sig, int sig_index,
210                                                VMRegPair* regs_from, int from_index, int regs_from_count, RegState* reg_state) {
211   VMReg reg;
212   if (!is_packing || SigEntry::skip_value_delimiters(sig, sig_index)) {
213     reg = regs_from[from_index].first();
214     if (!reg->is_valid() || reg_state[reg->value()] != reg_readonly) {
215       // Spilling this won't break cycles
216       return true;
217     }
218   } else {
219     ScalarizedInlineArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
220     VMReg from_reg;
221     BasicType bt;
222     bool found = false;
223     while (stream.next(from_reg, bt)) {
224       reg = from_reg;
225       assert(from_reg->is_valid(), "must be");
226       if (reg_state[from_reg->value()] == reg_readonly) {
227         found = true;
228         break;
229       }
230     }
231     if (!found) {
232       // Spilling fields in this inline type arg won't break cycles
233       return true;
234     }
235   }
236 
237   // Spill argument to be able to write the source and resolve circular dependencies
238   VMReg spill_reg = spill_reg_for(reg);
239   if (reg_state[spill_reg->value()] == reg_readonly) {
240     // We have already spilled (in previous round). The spilled register should be consumed by this round.
241   } else {
242     bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state);
243     assert(res, "Spilling should not fail");
244     // Set spill_reg as new source and update state
245     reg = spill_reg;
246     regs_from[from_index].set1(reg);
247     reg_state[reg->value()] = reg_readonly;
248   }
249 
250   return false; // Do not spill again in this round
251 }