1 /*
  2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "memory/resourceArea.hpp"
 27 #include "opto/addnode.hpp"
 28 #include "opto/callnode.hpp"
 29 #include "opto/cfgnode.hpp"
 30 #include "opto/compile.hpp"
 31 #include "opto/convertnode.hpp"
 32 #include "opto/locknode.hpp"
 33 #include "opto/memnode.hpp"
 34 #include "opto/mulnode.hpp"
 35 #include "opto/node.hpp"
 36 #include "opto/parse.hpp"
 37 #include "opto/phaseX.hpp"
 38 #include "opto/rootnode.hpp"
 39 #include "opto/runtime.hpp"
 40 #include "opto/type.hpp"
 41 #include "runtime/stubRoutines.hpp"
 42 
 43 //--------------------gen_stub-------------------------------
 44 void GraphKit::gen_stub(address C_function,
 45                         const char *name,
 46                         int is_fancy_jump,
 47                         bool pass_tls,
 48                         bool return_pc) {
 49   ResourceMark rm;
 50 
 51   const TypeTuple *jdomain = C->tf()->domain_sig();
 52   const TypeTuple *jrange  = C->tf()->range_sig();
 53 
 54   // The procedure start
 55   StartNode* start = new StartNode(root(), jdomain);
 56   _gvn.set_type_bottom(start);
 57 
 58   // Make a map, with JVM state
 59   uint parm_cnt = jdomain->cnt();
 60   uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
 61   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
 62   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
 63   JVMState* jvms = new (C) JVMState(0);
 64   jvms->set_bci(InvocationEntryBci);
 65   jvms->set_monoff(max_map);
 66   jvms->set_scloff(max_map);
 67   jvms->set_endoff(max_map);
 68   {
 69     SafePointNode *map = new SafePointNode( max_map, jvms );
 70     jvms->set_map(map);
 71     set_jvms(jvms);
 72     assert(map == this->map(), "kit.map is set");
 73   }
 74 
 75   // Make up the parameters
 76   uint i;
 77   for (i = 0; i < parm_cnt; i++) {
 78     map()->init_req(i, _gvn.transform(new ParmNode(start, i)));
 79   }
 80   for ( ; i<map()->req(); i++) {
 81     map()->init_req(i, top());      // For nicer debugging
 82   }
 83 
 84   // GraphKit requires memory to be a MergeMemNode:
 85   set_all_memory(map()->memory());
 86 
 87   // Get base of thread-local storage area
 88   Node* thread = _gvn.transform(new ThreadLocalNode());
 89 
 90   const int NoAlias = Compile::AliasIdxBot;
 91 
 92   Node* adr_last_Java_pc = basic_plus_adr(top(),
 93                                             thread,
 94                                             in_bytes(JavaThread::frame_anchor_offset()) +
 95                                             in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
 96 
 97   // Drop in the last_Java_sp.  last_Java_fp is not touched.
 98   // Always do this after the other "last_Java_frame" fields are set since
 99   // as soon as last_Java_sp != nullptr the has_last_Java_frame is true and
100   // users will look at the other fields.
101   //
102   Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
103   Node *last_sp = frameptr();
104   store_to_memory(control(), adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
105 
106   // Set _thread_in_native
107   // The order of stores into TLS is critical!  Setting _thread_in_native MUST
108   // be last, because a GC is allowed at any time after setting it and the GC
109   // will require last_Java_pc and last_Java_sp.
110 
111   //-----------------------------
112   // Compute signature for C call.  Varies from the Java signature!
113 
114   const Type **fields = TypeTuple::fields(2*parm_cnt+2);
115   uint cnt = TypeFunc::Parms;
116   // The C routines gets the base of thread-local storage passed in as an
117   // extra argument. Not all calls need it, but it is cheap to add here.
118   for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) {
119     const Type *f = jdomain->field_at(pcnt);
120     if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) {
121       fields[cnt++] = TypeLong::LONG;
122       fields[cnt] = Type::HALF; // Must add an additional half for a long.
123     } else {
124       fields[cnt] = f;
125     }
126   }
127   fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
128   // Also pass in the caller's PC, if asked for.
129   if (return_pc) {
130     fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC
131   }
132   const TypeTuple* domain = TypeTuple::make(cnt, fields);
133 
134   // The C routine we are about to call cannot return an oop; it can block on
135   // exit and a GC will trash the oop while it sits in C-land.  Instead, we
136   // return the oop through TLS for runtime calls.
137   // Also, C routines returning integer subword values leave the high
138   // order bits dirty; these must be cleaned up by explicit sign extension.
139   const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
140   // Make a private copy of jrange->fields();
141   const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
142   // Fixup oop returns
143   int retval_ptr = retval->isa_oop_ptr();
144   if (retval_ptr) {
145     assert( pass_tls, "Oop must be returned thru TLS" );
146     // Fancy-jumps return address; others return void
147     rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;
148 
149   } else if (retval->isa_int()) { // Returning any integer subtype?
150     // "Fatten" byte, char & short return types to 'int' to show that
151     // the native C code can return values with junk high order bits.
152     // We'll sign-extend it below later.
153     rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext
154 
155   } else if (jrange->cnt() >= TypeFunc::Parms+1) { // Else copy other types
156     rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
157     if (jrange->cnt() == TypeFunc::Parms+2) {
158       rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
159     }
160   }
161   const TypeTuple* range = TypeTuple::make(jrange->cnt(), rfields);
162 
163   // Final C signature
164   const TypeFunc *c_sig = TypeFunc::make(domain, range);
165 
166   //-----------------------------
167   // Make the call node.
168   CallRuntimeNode* call = new CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM, new (C) JVMState(0));
169   //-----------------------------
170 
171   // Fix-up the debug info for the call.
172   call->jvms()->set_bci(0);
173   call->jvms()->set_offsets(cnt);
174 
175   // Set fixed predefined input arguments.
176   cnt = 0;
177   for (i = 0; i < TypeFunc::Parms; i++) {
178     call->init_req(cnt++, map()->in(i));
179   }
180   // A little too aggressive on the parm copy; return address is not an input.
181   call->set_req(TypeFunc::ReturnAdr, top());
182   for (; i < parm_cnt; i++) { // Regular input arguments.
183     const Type *f = jdomain->field_at(i);
184     if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) {
185       call->init_req(cnt++, _gvn.transform(new ConvI2LNode(map()->in(i))));
186       call->init_req(cnt++, top());
187     } else {
188       call->init_req(cnt++, map()->in(i));
189     }
190   }
191   call->init_req(cnt++, thread);
192   if (return_pc) {             // Return PC, if asked for.
193     call->init_req(cnt++, returnadr());
194   }
195 
196   _gvn.transform_no_reclaim(call);
197 
198   //-----------------------------
199   // Now set up the return results
200   set_control( _gvn.transform( new ProjNode(call,TypeFunc::Control)) );
201   set_i_o(     _gvn.transform( new ProjNode(call,TypeFunc::I_O    )) );
202   set_all_memory_call(call);
203   if (range->cnt() > TypeFunc::Parms) {
204     Node* retnode = _gvn.transform( new ProjNode(call,TypeFunc::Parms) );
205     // C-land is allowed to return sub-word values.  Convert to integer type.
206     assert( retval != Type::TOP, "" );
207     if (retval == TypeInt::BOOL) {
208       retnode = _gvn.transform( new AndINode(retnode, intcon(0xFF)) );
209     } else if (retval == TypeInt::CHAR) {
210       retnode = _gvn.transform( new AndINode(retnode, intcon(0xFFFF)) );
211     } else if (retval == TypeInt::BYTE) {
212       retnode = _gvn.transform( new LShiftINode(retnode, intcon(24)) );
213       retnode = _gvn.transform( new RShiftINode(retnode, intcon(24)) );
214     } else if (retval == TypeInt::SHORT) {
215       retnode = _gvn.transform( new LShiftINode(retnode, intcon(16)) );
216       retnode = _gvn.transform( new RShiftINode(retnode, intcon(16)) );
217     }
218     map()->set_req( TypeFunc::Parms, retnode );
219   }
220 
221   //-----------------------------
222 
223   // Clear last_Java_sp
224   store_to_memory(control(), adr_sp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
225   // Clear last_Java_pc
226   store_to_memory(control(), adr_last_Java_pc, null(), T_ADDRESS, NoAlias, MemNode::unordered);
227 #if (defined(IA64) && !defined(AIX))
228   Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
229   store_to_memory(control(), adr_last_Java_fp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
230 #endif
231 
232   // For is-fancy-jump, the C-return value is also the branch target
233   Node* target = map()->in(TypeFunc::Parms);
234   // Runtime call returning oop in TLS?  Fetch it out
235   if( pass_tls ) {
236     Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
237     Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
238     map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
239     // clear thread-local-storage(tls)
240     store_to_memory(control(), adr, null(), T_ADDRESS, NoAlias, MemNode::unordered);
241   }
242 
243   //-----------------------------
244   // check exception
245   Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
246   Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
247 
248   Node* exit_memory = reset_memory();
249 
250   Node* cmp = _gvn.transform( new CmpPNode(pending, null()) );
251   Node* bo  = _gvn.transform( new BoolNode(cmp, BoolTest::ne) );
252   IfNode   *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN);
253 
254   Node* if_null     = _gvn.transform( new IfFalseNode(iff) );
255   Node* if_not_null = _gvn.transform( new IfTrueNode(iff)  );
256 
257   assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before");
258   Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
259   Node *to_exc = new TailCallNode(if_not_null,
260                                   i_o(),
261                                   exit_memory,
262                                   frameptr(),
263                                   returnadr(),
264                                   exc_target, null());
265   root()->add_req(_gvn.transform(to_exc));  // bind to root to keep live
266   C->init_start(start);
267 
268   //-----------------------------
269   // If this is a normal subroutine return, issue the return and be done.
270   Node *ret = nullptr;
271   switch( is_fancy_jump ) {
272   case 0:                       // Make a return instruction
273     // Return to caller, free any space for return address
274     ret = new ReturnNode(TypeFunc::Parms, if_null,
275                          i_o(),
276                          exit_memory,
277                          frameptr(),
278                          returnadr());
279     if (C->tf()->range_sig()->cnt() > TypeFunc::Parms)
280       ret->add_req( map()->in(TypeFunc::Parms) );
281     break;
282   case 1:    // This is a fancy tail-call jump.  Jump to computed address.
283     // Jump to new callee; leave old return address alone.
284     ret = new TailCallNode(if_null,
285                            i_o(),
286                            exit_memory,
287                            frameptr(),
288                            returnadr(),
289                            target, map()->in(TypeFunc::Parms));
290     break;
291   case 2:                       // Pop return address & jump
292     // Throw away old return address; jump to new computed address
293     //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow");
294     ret = new TailJumpNode(if_null,
295                                i_o(),
296                                exit_memory,
297                                frameptr(),
298                                target, map()->in(TypeFunc::Parms));
299     break;
300   default:
301     ShouldNotReachHere();
302   }
303   root()->add_req(_gvn.transform(ret));
304 }