< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "code/codeCache.hpp"

  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/compiledICHolder.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/jniHandles.hpp"
  45 #include "runtime/safepointMechanism.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/signature.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "utilities/align.hpp"

 215     } else {
 216       sp_offset = FloatRegisterImpl::save_slots_per_register * i;
 217     }
 218     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 219                               r->as_VMReg());
 220   }
 221 
 222   return oop_map;
 223 }
 224 
 225 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 226 #ifdef COMPILER2
 227   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 228                    Matcher::scalable_vector_reg_size(T_BYTE));
 229 #else
 230 #if !INCLUDE_JVMCI
 231   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 232 #endif
 233   __ pop_CPU_state(_save_vectors);
 234 #endif
 235   __ leave();
 236 
 237 }
 238 
 239 // Is vector's size (in bytes) bigger than a size saved by default?
 240 // 8 bytes vector registers are saved by default on AArch64.
 241 bool SharedRuntime::is_wide_vector(int size) {
 242   return size > 8;
 243 }
 244 
 245 // The java_calling_convention describes stack locations as ideal slots on
 246 // a frame with no abi restrictions. Since we must observe abi restrictions
 247 // (like the placement of the register window) the slots must be biased by
 248 // the following value.
 249 static int reg2offset_in(VMReg r) {
 250   // Account for saved rfp and lr
 251   // This should really be in_preserve_stack_slots
 252   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 253 }
 254 
 255 static int reg2offset_out(VMReg r) {

 665         // interpreter.
 666 
 667         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 668                            next_off : ld_off;
 669 
 670         // this can be a misaligned move
 671         __ ldr(r, Address(esp, offset));
 672       } else {
 673         // sign extend and use a full word?
 674         __ ldrw(r, Address(esp, ld_off));
 675       }
 676     } else {
 677       if (!r_2->is_valid()) {
 678         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 679       } else {
 680         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 681       }
 682     }
 683   }
 684 




 685   // 6243940 We might end up in handle_wrong_method if
 686   // the callee is deoptimized as we race thru here. If that
 687   // happens we don't want to take a safepoint because the
 688   // caller frame will look interpreted and arguments are now
 689   // "compiled" so it is much better to make this transition
 690   // invisible to the stack walking code. Unfortunately if
 691   // we try and find the callee by normal means a safepoint
 692   // is possible. So we stash the desired callee in the thread
 693   // and the vm will find there should this case occur.
 694 
 695   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 696 
 697   __ br(rscratch1);
 698 }
 699 
 700 // ---------------------------------------------------------------
 701 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 702                                                             int total_args_passed,
 703                                                             int comp_args_on_stack,
 704                                                             const BasicType *sig_bt,

1190                             const BasicType* sig_bt,
1191                             const VMRegPair* regs) {
1192   Register temp_reg = r19;  // not part of any compiled calling seq
1193   if (VerifyOops) {
1194     for (int i = 0; i < method->size_of_parameters(); i++) {
1195       if (sig_bt[i] == T_OBJECT ||
1196           sig_bt[i] == T_ARRAY) {
1197         VMReg r = regs[i].first();
1198         assert(r->is_valid(), "bad oop arg");
1199         if (r->is_stack()) {
1200           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1201           __ verify_oop(temp_reg);
1202         } else {
1203           __ verify_oop(r->as_Register());
1204         }
1205       }
1206     }
1207   }
1208 }
1209 

























































































1210 static void gen_special_dispatch(MacroAssembler* masm,
1211                                  const methodHandle& method,
1212                                  const BasicType* sig_bt,
1213                                  const VMRegPair* regs) {
1214   verify_oop_args(masm, method, sig_bt, regs);
1215   vmIntrinsics::ID iid = method->intrinsic_id();
1216 
1217   // Now write the args into the outgoing interpreter space
1218   bool     has_receiver   = false;
1219   Register receiver_reg   = noreg;
1220   int      member_arg_pos = -1;
1221   Register member_reg     = noreg;
1222   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1223   if (ref_kind != 0) {
1224     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1225     member_reg = r19;  // known to be free at this point
1226     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1227   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1228     has_receiver = true;
1229   } else {

1272 // convention (handlizes oops, etc), transitions to native, makes the call,
1273 // returns to java state (possibly blocking), unhandlizes any result and
1274 // returns.
1275 //
1276 // Critical native functions are a shorthand for the use of
1277 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1278 // functions.  The wrapper is expected to unpack the arguments before
1279 // passing them to the callee. Critical native functions leave the state _in_Java,
1280 // since they block out GC.
1281 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1282 // block and the check for pending exceptions it's impossible for them
1283 // to be thrown.
1284 //
1285 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1286                                                 const methodHandle& method,
1287                                                 int compile_id,
1288                                                 BasicType* in_sig_bt,
1289                                                 VMRegPair* in_regs,
1290                                                 BasicType ret_type,
1291                                                 address critical_entry) {































1292   if (method->is_method_handle_intrinsic()) {
1293     vmIntrinsics::ID iid = method->intrinsic_id();
1294     intptr_t start = (intptr_t)__ pc();
1295     int vep_offset = ((intptr_t)__ pc()) - start;
1296 
1297     // First instruction must be a nop as it may need to be patched on deoptimisation
1298     __ nop();
1299     gen_special_dispatch(masm,
1300                          method,
1301                          in_sig_bt,
1302                          in_regs);
1303     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1304     __ flush();
1305     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1306     return nmethod::new_native_nmethod(method,
1307                                        compile_id,
1308                                        masm->code(),
1309                                        vep_offset,
1310                                        frame_complete,
1311                                        stack_slots / VMRegImpl::slots_per_word,

  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/compiledICHolder.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/vframeArray.hpp"
  51 #include "utilities/align.hpp"

 216     } else {
 217       sp_offset = FloatRegisterImpl::save_slots_per_register * i;
 218     }
 219     oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 220                               r->as_VMReg());
 221   }
 222 
 223   return oop_map;
 224 }
 225 
 226 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 227 #ifdef COMPILER2
 228   __ pop_CPU_state(_save_vectors, Matcher::supports_scalable_vector(),
 229                    Matcher::scalable_vector_reg_size(T_BYTE));
 230 #else
 231 #if !INCLUDE_JVMCI
 232   assert(!_save_vectors, "vectors are generated only by C2 and JVMCI");
 233 #endif
 234   __ pop_CPU_state(_save_vectors);
 235 #endif
 236   __ ldp(rfp, lr, Address(__ post(sp, 2 * wordSize)));
 237 
 238 }
 239 
 240 // Is vector's size (in bytes) bigger than a size saved by default?
 241 // 8 bytes vector registers are saved by default on AArch64.
 242 bool SharedRuntime::is_wide_vector(int size) {
 243   return size > 8;
 244 }
 245 
 246 // The java_calling_convention describes stack locations as ideal slots on
 247 // a frame with no abi restrictions. Since we must observe abi restrictions
 248 // (like the placement of the register window) the slots must be biased by
 249 // the following value.
 250 static int reg2offset_in(VMReg r) {
 251   // Account for saved rfp and lr
 252   // This should really be in_preserve_stack_slots
 253   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
 254 }
 255 
 256 static int reg2offset_out(VMReg r) {

 666         // interpreter.
 667 
 668         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 669                            next_off : ld_off;
 670 
 671         // this can be a misaligned move
 672         __ ldr(r, Address(esp, offset));
 673       } else {
 674         // sign extend and use a full word?
 675         __ ldrw(r, Address(esp, ld_off));
 676       }
 677     } else {
 678       if (!r_2->is_valid()) {
 679         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 680       } else {
 681         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 682       }
 683     }
 684   }
 685 
 686   __ mov(rscratch2, rscratch1);
 687   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 688   __ mov(rscratch1, rscratch2);
 689 
 690   // 6243940 We might end up in handle_wrong_method if
 691   // the callee is deoptimized as we race thru here. If that
 692   // happens we don't want to take a safepoint because the
 693   // caller frame will look interpreted and arguments are now
 694   // "compiled" so it is much better to make this transition
 695   // invisible to the stack walking code. Unfortunately if
 696   // we try and find the callee by normal means a safepoint
 697   // is possible. So we stash the desired callee in the thread
 698   // and the vm will find there should this case occur.
 699 
 700   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 701 
 702   __ br(rscratch1);
 703 }
 704 
 705 // ---------------------------------------------------------------
 706 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 707                                                             int total_args_passed,
 708                                                             int comp_args_on_stack,
 709                                                             const BasicType *sig_bt,

1195                             const BasicType* sig_bt,
1196                             const VMRegPair* regs) {
1197   Register temp_reg = r19;  // not part of any compiled calling seq
1198   if (VerifyOops) {
1199     for (int i = 0; i < method->size_of_parameters(); i++) {
1200       if (sig_bt[i] == T_OBJECT ||
1201           sig_bt[i] == T_ARRAY) {
1202         VMReg r = regs[i].first();
1203         assert(r->is_valid(), "bad oop arg");
1204         if (r->is_stack()) {
1205           __ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
1206           __ verify_oop(temp_reg);
1207         } else {
1208           __ verify_oop(r->as_Register());
1209         }
1210       }
1211     }
1212   }
1213 }
1214 
1215 // defined in stubGenerator_aarch64.cpp
1216 OopMap* continuation_enter_setup(MacroAssembler* masm, int& stack_slots);
1217 void fill_continuation_entry(MacroAssembler* masm);
1218 void continuation_enter_cleanup(MacroAssembler* masm);
1219 
1220 // enterSpecial(Continuation c, boolean isContinue)
1221 // On entry: c_rarg1 -- the continuation object
1222 //           c_rarg2 -- isContinue
1223 static void gen_continuation_enter(MacroAssembler* masm,
1224                                  const methodHandle& method,
1225                                  const BasicType* sig_bt,
1226                                  const VMRegPair* regs,
1227                                  int& exception_offset,
1228                                  OopMapSet*oop_maps,
1229                                  int& frame_complete,
1230                                  int& stack_slots) {
1231   //verify_oop_args(masm, method, sig_bt, regs);
1232   Address resolve(SharedRuntime::get_resolve_static_call_stub(), 
1233                   relocInfo::static_call_type);
1234 
1235   stack_slots = 2; // will be overwritten
1236   address start = __ pc();
1237 
1238   Label call_thaw, exit;
1239 
1240   __ enter(); // push(rbp);
1241 
1242   //BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1243   //bs->nmethod_entry_barrier(masm);
1244   OopMap* map = continuation_enter_setup(masm, stack_slots);
1245 
1246   // Frame is now completed as far as size and linkage.
1247   frame_complete =__ pc() - start;
1248 
1249   fill_continuation_entry(masm);
1250 
1251   __ cmp(c_rarg2, (u1)0);
1252   __ br(Assembler::NE, call_thaw);
1253   
1254   address mark = __ pc();
1255 //  __ relocate(resolve.rspec());
1256   //if (!far_branches()) {
1257 //  __ bl(resolve.target()); 
1258   __ trampoline_call1(resolve, NULL, false);
1259 
1260   oop_maps->add_gc_map(__ pc() - start, map);
1261   __ post_call_nop();
1262 
1263   __ b(exit);
1264 
1265   __ bind(call_thaw);
1266 
1267   rt_call(masm, CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1268   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1269   ContinuationEntry::return_pc_offset = __ pc() - start;
1270   __ post_call_nop();
1271 
1272   __ bind(exit);
1273   continuation_enter_cleanup(masm);
1274   __ leave();
1275   __ ret(lr);
1276 
1277   /// exception handling
1278 
1279   exception_offset = __ pc() - start;
1280   {
1281       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1282   
1283       continuation_enter_cleanup(masm);
1284       // __ mov(sp, rfp);
1285   
1286       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1287       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, c_rarg1);
1288 
1289       // see OptoRuntime::generate_exception_blob: r0 -- exception oop, r3 -- exception pc
1290 
1291       __ mov(r1, r0); // the exception handler
1292       __ mov(r0, r19); // restore return value contaning the exception oop
1293       __ verify_oop(r0);
1294 
1295       __ leave();
1296       __ mov(r3, lr);
1297       __ br(r1); // the exception handler
1298   }
1299 
1300   CodeBuffer* cbuf = masm->code_section()->outer();
1301   address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, mark);
1302 }
1303 
1304 static void gen_special_dispatch(MacroAssembler* masm,
1305                                  const methodHandle& method,
1306                                  const BasicType* sig_bt,
1307                                  const VMRegPair* regs) {
1308   verify_oop_args(masm, method, sig_bt, regs);
1309   vmIntrinsics::ID iid = method->intrinsic_id();
1310 
1311   // Now write the args into the outgoing interpreter space
1312   bool     has_receiver   = false;
1313   Register receiver_reg   = noreg;
1314   int      member_arg_pos = -1;
1315   Register member_reg     = noreg;
1316   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1317   if (ref_kind != 0) {
1318     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1319     member_reg = r19;  // known to be free at this point
1320     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1321   } else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
1322     has_receiver = true;
1323   } else {

1366 // convention (handlizes oops, etc), transitions to native, makes the call,
1367 // returns to java state (possibly blocking), unhandlizes any result and
1368 // returns.
1369 //
1370 // Critical native functions are a shorthand for the use of
1371 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1372 // functions.  The wrapper is expected to unpack the arguments before
1373 // passing them to the callee. Critical native functions leave the state _in_Java,
1374 // since they block out GC.
1375 // Some other parts of JNI setup are skipped like the tear down of the JNI handle
1376 // block and the check for pending exceptions it's impossible for them
1377 // to be thrown.
1378 //
1379 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1380                                                 const methodHandle& method,
1381                                                 int compile_id,
1382                                                 BasicType* in_sig_bt,
1383                                                 VMRegPair* in_regs,
1384                                                 BasicType ret_type,
1385                                                 address critical_entry) {
1386   if (method->is_continuation_enter_intrinsic()) {
1387     vmIntrinsics::ID iid = method->intrinsic_id();
1388     intptr_t start = (intptr_t)__ pc();
1389     int vep_offset = ((intptr_t)__ pc()) - start;
1390     int exception_offset = 0;
1391     int frame_complete = 0;
1392     int stack_slots = 0;
1393     OopMapSet* oop_maps =  new OopMapSet();
1394     gen_continuation_enter(masm,
1395                          method,
1396                          in_sig_bt,
1397                          in_regs,
1398                          exception_offset,
1399                          oop_maps,
1400                          frame_complete,
1401                          stack_slots);
1402     __ flush();
1403     nmethod* nm = nmethod::new_native_nmethod(method,
1404                                               compile_id,
1405                                               masm->code(),
1406                                               vep_offset,
1407                                               frame_complete,
1408                                               stack_slots,
1409                                               in_ByteSize(-1),
1410                                               in_ByteSize(-1),
1411                                               oop_maps,
1412                                               exception_offset);
1413     ContinuationEntry::set_enter_nmethod(nm);
1414     return nm;
1415   }
1416 
1417   if (method->is_method_handle_intrinsic()) {
1418     vmIntrinsics::ID iid = method->intrinsic_id();
1419     intptr_t start = (intptr_t)__ pc();
1420     int vep_offset = ((intptr_t)__ pc()) - start;
1421 
1422     // First instruction must be a nop as it may need to be patched on deoptimisation
1423     __ nop();
1424     gen_special_dispatch(masm,
1425                          method,
1426                          in_sig_bt,
1427                          in_regs);
1428     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1429     __ flush();
1430     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1431     return nmethod::new_native_nmethod(method,
1432                                        compile_id,
1433                                        masm->code(),
1434                                        vep_offset,
1435                                        frame_complete,
1436                                        stack_slots / VMRegImpl::slots_per_word,
< prev index next >