< prev index next >

src/hotspot/cpu/s390/sharedRuntime_s390.cpp

Print this page

2075   __ flush();
2076   //////////////////////////////////////////////////////////////////////
2077   // end of code generation
2078   //////////////////////////////////////////////////////////////////////
2079 
2080 
2081   nmethod *nm = nmethod::new_native_nmethod(method,
2082                                             compile_id,
2083                                             masm->code(),
2084                                             (int)(wrapper_VEPStart-wrapper_CodeStart),
2085                                             (int)(wrapper_FrameDone-wrapper_CodeStart),
2086                                             stack_slots / VMRegImpl::slots_per_word,
2087                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2088                                             in_ByteSize(lock_offset),
2089                                             oop_maps);
2090 
2091   return nm;
2092 }
2093 
2094 static address gen_c2i_adapter(MacroAssembler  *masm,
2095                                int total_args_passed,
2096                                int comp_args_on_stack,
2097                                const BasicType *sig_bt,
2098                                const VMRegPair *regs,
2099                                Label &skip_fixup) {
2100   // Before we get into the guts of the C2I adapter, see if we should be here
2101   // at all. We've come from compiled code and are attempting to jump to the
2102   // interpreter, which means the caller made a static call to get here
2103   // (vcalls always get a compiled target if there is one). Check for a
2104   // compiled target. If there is one, we need to patch the caller's call.
2105 
2106   // These two defs MUST MATCH code in gen_i2c2i_adapter!
2107   const Register ientry = Z_R11;
2108   const Register code   = Z_R11;
2109 
2110   address c2i_entrypoint;
2111   Label   patch_callsite;
2112 
2113   // Regular (verified) c2i entry point.
2114   c2i_entrypoint = __ pc();
2115 
2116   // Call patching needed?
2117   __ load_and_test_long(Z_R0_scratch, method_(code));
2118   __ z_lg(ientry, method_(interpreter_entry));  // Preload interpreter entry (also if patching).
2119   __ z_brne(patch_callsite);                    // Patch required if code isn't null (compiled target exists).
2120 
2121   __ bind(skip_fixup);  // Return point from patch_callsite.
2122 
2123   // Since all args are passed on the stack, total_args_passed*wordSize is the
2124   // space we need. We need ABI scratch area but we use the caller's since
2125   // it has already been allocated.
2126 
2127   const int abi_scratch = frame::z_top_ijava_frame_abi_size;
2128   int       extraspace  = align_up(total_args_passed, 2)*wordSize + abi_scratch;
2129   Register  sender_SP   = Z_R10;
2130   Register  value       = Z_R12;
2131 
2132   // Remember the senderSP so we can pop the interpreter arguments off of the stack.
2133   // In addition, template interpreter expects initial_caller_sp in Z_R10.
2134   __ z_lgr(sender_SP, Z_SP);
2135 
2136   // This should always fit in 14 bit immediate.
2137   __ resize_frame(-extraspace, Z_R0_scratch);
2138 
2139   // We use the caller's ABI scratch area (out_preserved_stack_slots) for the initial
2140   // args. This essentially moves the callers ABI scratch area from the top to the
2141   // bottom of the arg area.
2142 
2143   int st_off =  extraspace - wordSize;
2144 
2145   // Now write the args into the outgoing interpreter space.
2146   for (int i = 0; i < total_args_passed; i++) {


2147     VMReg r_1 = regs[i].first();
2148     VMReg r_2 = regs[i].second();
2149     if (!r_1->is_valid()) {
2150       assert(!r_2->is_valid(), "");
2151       continue;
2152     }
2153     if (r_1->is_stack()) {
2154       // The calling convention produces OptoRegs that ignore the preserve area (abi scratch).
2155       // We must account for it here.
2156       int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
2157 
2158       if (!r_2->is_valid()) {
2159         __ z_mvc(Address(Z_SP, st_off), Address(sender_SP, ld_off), sizeof(void*));
2160       } else {
2161         // longs are given 2 64-bit slots in the interpreter,
2162         // but the data is passed in only 1 slot.
2163         if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
2164 #ifdef ASSERT
2165           __ clear_mem(Address(Z_SP, st_off), sizeof(void *));
2166 #endif
2167           st_off -= wordSize;
2168         }
2169         __ z_mvc(Address(Z_SP, st_off), Address(sender_SP, ld_off), sizeof(void*));
2170       }
2171     } else {
2172       if (r_1->is_Register()) {
2173         if (!r_2->is_valid()) {
2174           __ z_st(r_1->as_Register(), st_off, Z_SP);
2175         } else {
2176           // longs are given 2 64-bit slots in the interpreter, but the
2177           // data is passed in only 1 slot.
2178           if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
2179 #ifdef ASSERT
2180             __ clear_mem(Address(Z_SP, st_off), sizeof(void *));
2181 #endif
2182             st_off -= wordSize;
2183           }
2184           __ z_stg(r_1->as_Register(), st_off, Z_SP);
2185         }
2186       } else {
2187         assert(r_1->is_FloatRegister(), "");
2188         if (!r_2->is_valid()) {
2189           __ z_ste(r_1->as_FloatRegister(), st_off, Z_SP);
2190         } else {
2191           // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
2192           // data is passed in only 1 slot.
2193           // One of these should get known junk...
2194 #ifdef ASSERT
2195           __ z_lzdr(Z_F1);
2196           __ z_std(Z_F1, st_off, Z_SP);
2197 #endif
2198           st_off-=wordSize;

2223   __ bind(patch_callsite);
2224 
2225   RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers);
2226   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), Z_method, Z_R14);
2227   RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
2228   __ z_bru(skip_fixup);
2229 
2230   // end of out-of-line code
2231 
2232   return c2i_entrypoint;
2233 }
2234 
2235 // On entry, the following registers are set
2236 //
2237 //    Z_thread  r8  - JavaThread*
2238 //    Z_method  r9  - callee's method (method to be invoked)
2239 //    Z_esp     r7  - operand (or expression) stack pointer of caller. one slot above last arg.
2240 //    Z_SP      r15 - SP prepared by call stub such that caller's outgoing args are near top
2241 //
2242 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
2243                                     int total_args_passed,
2244                                     int comp_args_on_stack,
2245                                     const BasicType *sig_bt,
2246                                     const VMRegPair *regs) {
2247   const Register value = Z_R12;
2248   const Register ld_ptr= Z_esp;

2249 
2250   int ld_offset = total_args_passed * wordSize;
2251 
2252   // Cut-out for having no stack args.
2253   if (comp_args_on_stack) {
2254     // Sig words on the stack are greater than VMRegImpl::stack0. Those in
2255     // registers are below. By subtracting stack0, we either get a negative
2256     // number (all values in registers) or the maximum stack slot accessed.
2257     // Convert VMRegImpl (4 byte) stack slots to words.
2258     int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
2259     // Round up to miminum stack alignment, in wordSize
2260     comp_words_on_stack = align_up(comp_words_on_stack, 2);
2261 
2262     __ resize_frame(-comp_words_on_stack*wordSize, Z_R0_scratch);
2263   }
2264 
2265   // Now generate the shuffle code. Pick up all register args and move the
2266   // rest through register value=Z_R12.
2267   for (int i = 0; i < total_args_passed; i++) {
2268     if (sig_bt[i] == T_VOID) {
2269       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

2270       continue;
2271     }
2272 
2273     // Pick up 0, 1 or 2 words from ld_ptr.
2274     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
2275            "scrambled load targets?");
2276     VMReg r_1 = regs[i].first();
2277     VMReg r_2 = regs[i].second();
2278     if (!r_1->is_valid()) {
2279       assert(!r_2->is_valid(), "");
2280       continue;
2281     }
2282     if (r_1->is_FloatRegister()) {
2283       if (!r_2->is_valid()) {
2284         __ z_le(r_1->as_FloatRegister(), ld_offset, ld_ptr);
2285         ld_offset-=wordSize;
2286       } else {
2287         // Skip the unused interpreter slot.
2288         __ z_ld(r_1->as_FloatRegister(), ld_offset - wordSize, ld_ptr);
2289         ld_offset -= 2 * wordSize;
2290       }
2291     } else {
2292       if (r_1->is_stack()) {
2293         // Must do a memory to memory move.
2294         int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
2295 
2296         if (!r_2->is_valid()) {
2297           __ z_mvc(Address(Z_SP, st_off), Address(ld_ptr, ld_offset), sizeof(void*));
2298         } else {
2299           // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
2300           // data is passed in only 1 slot.
2301           if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
2302             ld_offset -= wordSize;
2303           }
2304           __ z_mvc(Address(Z_SP, st_off), Address(ld_ptr, ld_offset), sizeof(void*));
2305         }
2306       } else {
2307         if (!r_2->is_valid()) {
2308           // Not sure we need to do this but it shouldn't hurt.
2309           if (is_reference_type(sig_bt[i]) || sig_bt[i] == T_ADDRESS) {
2310             __ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
2311           } else {
2312             __ z_l(r_1->as_Register(), ld_offset, ld_ptr);
2313           }
2314         } else {
2315           // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
2316           // data is passed in only 1 slot.
2317           if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
2318             ld_offset -= wordSize;
2319           }
2320           __ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
2321         }
2322       }
2323       ld_offset -= wordSize;
2324     }
2325   }
2326 
2327   // Jump to the compiled code just as if compiled code was doing it.
2328   // load target address from method:
2329   __ z_lg(Z_R1_scratch, Address(Z_method, Method::from_compiled_offset()));
2330 
2331   // Store method into thread->callee_target.
2332   // 6243940: We might end up in handle_wrong_method if
2333   // the callee is deoptimized as we race thru here. If that
2334   // happens we don't want to take a safepoint because the
2335   // caller frame will look interpreted and arguments are now
2336   // "compiled" so it is much better to make this transition
2337   // invisible to the stack walking code. Unfortunately, if
2338   // we try and find the callee by normal means a safepoint
2339   // is possible. So we stash the desired callee in the thread
2340   // and the vm will find it there should this case occur.
2341   __ z_stg(Z_method, thread_(callee_target));
2342 
2343   __ z_br(Z_R1_scratch);
2344 }
2345 
2346 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
2347                                             int total_args_passed,
2348                                             int comp_args_on_stack,
2349                                             const BasicType *sig_bt,
2350                                             const VMRegPair *regs,
2351                                             address entry_address[AdapterBlob::ENTRY_COUNT]) {






2352   __ align(CodeEntryAlignment);
2353   entry_address[AdapterBlob::I2C] = __ pc();
2354   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
2355 
2356   Label skip_fixup;
2357   {
2358     Label ic_miss;
2359 
2360     // Out-of-line call to ic_miss handler.
2361     __ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
2362 
2363     // Unverified Entry Point UEP
2364     __ align(CodeEntryAlignment);
2365     entry_address[AdapterBlob::C2I_Unverified] = __ pc();
2366 
2367     __ ic_check(2);
2368     __ z_lg(Z_method, Address(Z_inline_cache, CompiledICData::speculated_method_offset()));
2369     // This def MUST MATCH code in gen_c2i_adapter!
2370     const Register code = Z_R11;
2371 
2372     __ load_and_test_long(Z_R0, method_(code));
2373     __ z_brne(ic_miss);  // Cache miss: call runtime to handle this.
2374 

2379 
2380   // Class initialization barrier for static methods
2381   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
2382   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2383   Label L_skip_barrier;
2384 
2385   // Bypass the barrier for non-static methods
2386   __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
2387   __ z_bfalse(L_skip_barrier); // non-static
2388 
2389   Register klass = Z_R11;
2390   __ load_method_holder(klass, Z_method);
2391   __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
2392 
2393   __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
2394   __ z_br(klass);
2395 
2396   __ bind(L_skip_barrier);
2397   entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
2398 
2399   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
2400   return;
2401 }
2402 
2403 // This function returns the adjust size (in number of words) to a c2i adapter
2404 // activation for use during deoptimization.
2405 //
2406 // Actually only compiled frames need to be adjusted, but it
2407 // doesn't harm to adjust entry and interpreter frames, too.
2408 //
2409 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2410   assert(callee_locals >= callee_parameters,
2411           "test and remove; got more parms than locals");
2412   // Handle the abi adjustment here instead of doing it in push_skeleton_frames.
2413   return (callee_locals - callee_parameters) * Interpreter::stackElementWords +
2414          frame::z_parent_ijava_frame_abi_size / BytesPerWord;
2415 }
2416 
2417 uint SharedRuntime::in_preserve_stack_slots() {
2418   return frame::jit_in_preserve_size_in_4_byte_units;
2419 }

3390 
3391 extern "C"
3392 int SpinPause() {
3393   return 0;
3394 }
3395 
3396 #if INCLUDE_JFR
3397 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3398   if (!Continuations::enabled()) return nullptr;
3399   Unimplemented();
3400   return nullptr;
3401 }
3402 
3403 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3404   if (!Continuations::enabled()) return nullptr;
3405   Unimplemented();
3406   return nullptr;
3407 }
3408 
3409 #endif // INCLUDE_JFR














2075   __ flush();
2076   //////////////////////////////////////////////////////////////////////
2077   // end of code generation
2078   //////////////////////////////////////////////////////////////////////
2079 
2080 
2081   nmethod *nm = nmethod::new_native_nmethod(method,
2082                                             compile_id,
2083                                             masm->code(),
2084                                             (int)(wrapper_VEPStart-wrapper_CodeStart),
2085                                             (int)(wrapper_FrameDone-wrapper_CodeStart),
2086                                             stack_slots / VMRegImpl::slots_per_word,
2087                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2088                                             in_ByteSize(lock_offset),
2089                                             oop_maps);
2090 
2091   return nm;
2092 }
2093 
2094 static address gen_c2i_adapter(MacroAssembler  *masm,

2095                                int comp_args_on_stack,
2096                                const GrowableArray<SigEntry>* sig,
2097                                const VMRegPair *regs,
2098                                Label &skip_fixup) {
2099   // Before we get into the guts of the C2I adapter, see if we should be here
2100   // at all. We've come from compiled code and are attempting to jump to the
2101   // interpreter, which means the caller made a static call to get here
2102   // (vcalls always get a compiled target if there is one). Check for a
2103   // compiled target. If there is one, we need to patch the caller's call.
2104 
2105   // These two defs MUST MATCH code in gen_i2c2i_adapter!
2106   const Register ientry = Z_R11;
2107   const Register code   = Z_R11;
2108 
2109   address c2i_entrypoint;
2110   Label   patch_callsite;
2111 
2112   // Regular (verified) c2i entry point.
2113   c2i_entrypoint = __ pc();
2114 
2115   // Call patching needed?
2116   __ load_and_test_long(Z_R0_scratch, method_(code));
2117   __ z_lg(ientry, method_(interpreter_entry));  // Preload interpreter entry (also if patching).
2118   __ z_brne(patch_callsite);                    // Patch required if code isn't null (compiled target exists).
2119 
2120   __ bind(skip_fixup);  // Return point from patch_callsite.
2121 
2122   // Since all args are passed on the stack, total_args_passed*wordSize is the
2123   // space we need. We need ABI scratch area but we use the caller's since
2124   // it has already been allocated.
2125   int       total_args_passed = sig->length();
2126   const int abi_scratch = frame::z_top_ijava_frame_abi_size;
2127   int       extraspace  = align_up(total_args_passed, 2)*wordSize + abi_scratch;
2128   Register  sender_SP   = Z_R10;
2129   Register  value       = Z_R12;
2130 
2131   // Remember the senderSP so we can pop the interpreter arguments off of the stack.
2132   // In addition, template interpreter expects initial_caller_sp in Z_R10.
2133   __ z_lgr(sender_SP, Z_SP);
2134 
2135   // This should always fit in 14 bit immediate.
2136   __ resize_frame(-extraspace, Z_R0_scratch);
2137 
2138   // We use the caller's ABI scratch area (out_preserved_stack_slots) for the initial
2139   // args. This essentially moves the callers ABI scratch area from the top to the
2140   // bottom of the arg area.
2141 
2142   int st_off =  extraspace - wordSize;
2143 
2144   // Now write the args into the outgoing interpreter space.
2145   for (int i = 0; i < total_args_passed; i++) {
2146     BasicType bt = sig->at(i)._bt;
2147 
2148     VMReg r_1 = regs[i].first();
2149     VMReg r_2 = regs[i].second();
2150     if (!r_1->is_valid()) {
2151       assert(!r_2->is_valid(), "");
2152       continue;
2153     }
2154     if (r_1->is_stack()) {
2155       // The calling convention produces OptoRegs that ignore the preserve area (abi scratch).
2156       // We must account for it here.
2157       int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
2158 
2159       if (!r_2->is_valid()) {
2160         __ z_mvc(Address(Z_SP, st_off), Address(sender_SP, ld_off), sizeof(void*));
2161       } else {
2162         // longs are given 2 64-bit slots in the interpreter,
2163         // but the data is passed in only 1 slot.
2164         if (bt == T_LONG || bt == T_DOUBLE) {
2165 #ifdef ASSERT
2166           __ clear_mem(Address(Z_SP, st_off), sizeof(void *));
2167 #endif
2168           st_off -= wordSize;
2169         }
2170         __ z_mvc(Address(Z_SP, st_off), Address(sender_SP, ld_off), sizeof(void*));
2171       }
2172     } else {
2173       if (r_1->is_Register()) {
2174         if (!r_2->is_valid()) {
2175           __ z_st(r_1->as_Register(), st_off, Z_SP);
2176         } else {
2177           // longs are given 2 64-bit slots in the interpreter, but the
2178           // data is passed in only 1 slot.
2179           if (bt == T_LONG || bt == T_DOUBLE) {
2180 #ifdef ASSERT
2181             __ clear_mem(Address(Z_SP, st_off), sizeof(void *));
2182 #endif
2183             st_off -= wordSize;
2184           }
2185           __ z_stg(r_1->as_Register(), st_off, Z_SP);
2186         }
2187       } else {
2188         assert(r_1->is_FloatRegister(), "");
2189         if (!r_2->is_valid()) {
2190           __ z_ste(r_1->as_FloatRegister(), st_off, Z_SP);
2191         } else {
2192           // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
2193           // data is passed in only 1 slot.
2194           // One of these should get known junk...
2195 #ifdef ASSERT
2196           __ z_lzdr(Z_F1);
2197           __ z_std(Z_F1, st_off, Z_SP);
2198 #endif
2199           st_off-=wordSize;

2224   __ bind(patch_callsite);
2225 
2226   RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers);
2227   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), Z_method, Z_R14);
2228   RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
2229   __ z_bru(skip_fixup);
2230 
2231   // end of out-of-line code
2232 
2233   return c2i_entrypoint;
2234 }
2235 
2236 // On entry, the following registers are set
2237 //
2238 //    Z_thread  r8  - JavaThread*
2239 //    Z_method  r9  - callee's method (method to be invoked)
2240 //    Z_esp     r7  - operand (or expression) stack pointer of caller. one slot above last arg.
2241 //    Z_SP      r15 - SP prepared by call stub such that caller's outgoing args are near top
2242 //
2243 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,

2244                                     int comp_args_on_stack,
2245                                     const GrowableArray<SigEntry>* sig,
2246                                     const VMRegPair *regs) {
2247   const Register value = Z_R12;
2248   const Register ld_ptr= Z_esp;
2249   int total_args_passed = sig->length();
2250 
2251   int ld_offset = total_args_passed * wordSize;
2252 
2253   // Cut-out for having no stack args.
2254   if (comp_args_on_stack) {
2255     // Sig words on the stack are greater than VMRegImpl::stack0. Those in
2256     // registers are below. By subtracting stack0, we either get a negative
2257     // number (all values in registers) or the maximum stack slot accessed.
2258     // Convert VMRegImpl (4 byte) stack slots to words.
2259     int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
2260     // Round up to miminum stack alignment, in wordSize
2261     comp_words_on_stack = align_up(comp_words_on_stack, 2);
2262 
2263     __ resize_frame(-comp_words_on_stack*wordSize, Z_R0_scratch);
2264   }
2265 
2266   // Now generate the shuffle code. Pick up all register args and move the
2267   // rest through register value=Z_R12.
2268   for (int i = 0; i < total_args_passed; i++) {
2269     BasicType bt = sig->at(i)._bt;
2270     if (bt == T_VOID) {
2271       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
2272       continue;
2273     }
2274 
2275     // Pick up 0, 1 or 2 words from ld_ptr.
2276     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
2277            "scrambled load targets?");
2278     VMReg r_1 = regs[i].first();
2279     VMReg r_2 = regs[i].second();
2280     if (!r_1->is_valid()) {
2281       assert(!r_2->is_valid(), "");
2282       continue;
2283     }
2284     if (r_1->is_FloatRegister()) {
2285       if (!r_2->is_valid()) {
2286         __ z_le(r_1->as_FloatRegister(), ld_offset, ld_ptr);
2287         ld_offset-=wordSize;
2288       } else {
2289         // Skip the unused interpreter slot.
2290         __ z_ld(r_1->as_FloatRegister(), ld_offset - wordSize, ld_ptr);
2291         ld_offset -= 2 * wordSize;
2292       }
2293     } else {
2294       if (r_1->is_stack()) {
2295         // Must do a memory to memory move.
2296         int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
2297 
2298         if (!r_2->is_valid()) {
2299           __ z_mvc(Address(Z_SP, st_off), Address(ld_ptr, ld_offset), sizeof(void*));
2300         } else {
2301           // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
2302           // data is passed in only 1 slot.
2303           if (bt == T_LONG || bt == T_DOUBLE) {
2304             ld_offset -= wordSize;
2305           }
2306           __ z_mvc(Address(Z_SP, st_off), Address(ld_ptr, ld_offset), sizeof(void*));
2307         }
2308       } else {
2309         if (!r_2->is_valid()) {
2310           // Not sure we need to do this but it shouldn't hurt.
2311           if (is_reference_type(bt) || bt == T_ADDRESS) {
2312             __ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
2313           } else {
2314             __ z_l(r_1->as_Register(), ld_offset, ld_ptr);
2315           }
2316         } else {
2317           // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
2318           // data is passed in only 1 slot.
2319           if (bt == T_LONG || bt == T_DOUBLE) {
2320             ld_offset -= wordSize;
2321           }
2322           __ z_lg(r_1->as_Register(), ld_offset, ld_ptr);
2323         }
2324       }
2325       ld_offset -= wordSize;
2326     }
2327   }
2328 
2329   // Jump to the compiled code just as if compiled code was doing it.
2330   // load target address from method:
2331   __ z_lg(Z_R1_scratch, Address(Z_method, Method::from_compiled_offset()));
2332 
2333   // Store method into thread->callee_target.
2334   // 6243940: We might end up in handle_wrong_method if
2335   // the callee is deoptimized as we race thru here. If that
2336   // happens we don't want to take a safepoint because the
2337   // caller frame will look interpreted and arguments are now
2338   // "compiled" so it is much better to make this transition
2339   // invisible to the stack walking code. Unfortunately, if
2340   // we try and find the callee by normal means a safepoint
2341   // is possible. So we stash the desired callee in the thread
2342   // and the vm will find it there should this case occur.
2343   __ z_stg(Z_method, thread_(callee_target));
2344 
2345   __ z_br(Z_R1_scratch);
2346 }
2347 
2348 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,

2349                                             int comp_args_on_stack,
2350                                             const GrowableArray<SigEntry>* sig,
2351                                             const VMRegPair* regs,
2352                                             const GrowableArray<SigEntry>* sig_cc,
2353                                             const VMRegPair* regs_cc,
2354                                             const GrowableArray<SigEntry>* sig_cc_ro,
2355                                             const VMRegPair* regs_cc_ro,
2356                                             address entry_address[AdapterBlob::ENTRY_COUNT],
2357                                             AdapterBlob*& new_adapter,
2358                                             bool allocate_code_blob) {
2359   __ align(CodeEntryAlignment);
2360   entry_address[AdapterBlob::I2C] = __ pc();
2361   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
2362 
2363   Label skip_fixup;
2364   {
2365     Label ic_miss;
2366 
2367     // Out-of-line call to ic_miss handler.
2368     __ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
2369 
2370     // Unverified Entry Point UEP
2371     __ align(CodeEntryAlignment);
2372     entry_address[AdapterBlob::C2I_Unverified] = __ pc();
2373 
2374     __ ic_check(2);
2375     __ z_lg(Z_method, Address(Z_inline_cache, CompiledICData::speculated_method_offset()));
2376     // This def MUST MATCH code in gen_c2i_adapter!
2377     const Register code = Z_R11;
2378 
2379     __ load_and_test_long(Z_R0, method_(code));
2380     __ z_brne(ic_miss);  // Cache miss: call runtime to handle this.
2381 

2386 
2387   // Class initialization barrier for static methods
2388   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
2389   assert(VM_Version::supports_fast_class_init_checks(), "sanity");
2390   Label L_skip_barrier;
2391 
2392   // Bypass the barrier for non-static methods
2393   __ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
2394   __ z_bfalse(L_skip_barrier); // non-static
2395 
2396   Register klass = Z_R11;
2397   __ load_method_holder(klass, Z_method);
2398   __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
2399 
2400   __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
2401   __ z_br(klass);
2402 
2403   __ bind(L_skip_barrier);
2404   entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
2405 
2406   gen_c2i_adapter(masm, comp_args_on_stack, sig, regs, skip_fixup);
2407   return;
2408 }
2409 
2410 // This function returns the adjust size (in number of words) to a c2i adapter
2411 // activation for use during deoptimization.
2412 //
2413 // Actually only compiled frames need to be adjusted, but it
2414 // doesn't harm to adjust entry and interpreter frames, too.
2415 //
2416 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2417   assert(callee_locals >= callee_parameters,
2418           "test and remove; got more parms than locals");
2419   // Handle the abi adjustment here instead of doing it in push_skeleton_frames.
2420   return (callee_locals - callee_parameters) * Interpreter::stackElementWords +
2421          frame::z_parent_ijava_frame_abi_size / BytesPerWord;
2422 }
2423 
2424 uint SharedRuntime::in_preserve_stack_slots() {
2425   return frame::jit_in_preserve_size_in_4_byte_units;
2426 }

3397 
3398 extern "C"
3399 int SpinPause() {
3400   return 0;
3401 }
3402 
3403 #if INCLUDE_JFR
3404 RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
3405   if (!Continuations::enabled()) return nullptr;
3406   Unimplemented();
3407   return nullptr;
3408 }
3409 
3410 RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
3411   if (!Continuations::enabled()) return nullptr;
3412   Unimplemented();
3413   return nullptr;
3414 }
3415 
3416 #endif // INCLUDE_JFR
3417 
3418 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
3419 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
3420 
3421 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
3422   Unimplemented();
3423   return 0;
3424 }
3425 
3426 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3427   Unimplemented();
3428   return nullptr;
3429 }
< prev index next >