< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

   1 /*
   2  * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"

  29 #include "code/aotCodeCache.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"

 190 
 191   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 192                                      reg_save_size * BytesPerInt, 16);
 193   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 194   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 195   // The caller will allocate additional_frame_words
 196   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 197   // CodeBlob frame size is in words.
 198   int frame_size_in_words = frame_size_in_bytes / wordSize;
 199   *total_frame_words = frame_size_in_words;
 200 
 201   // Save Integer and Float registers.
 202   __ enter();
 203   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 204 
 205   // Set an oopmap for the call site.  This oopmap will map all
 206   // oop-registers and debug-info registers as callee-saved.  This
 207   // will allow deoptimization at this safepoint to find all possible
 208   // debug-info recordings, as well as let GC find all oops.
 209 
 210   OopMapSet *oop_maps = new OopMapSet();
 211   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 212 
 213   for (int i = 0; i < Register::number_of_registers; i++) {
 214     Register r = as_Register(i);
 215     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 216       // SP offsets are in 4-byte words.
 217       // Register slots are 8 bytes wide, 32 floating-point registers.
 218       int sp_offset = Register::max_slots_per_register * i +
 219                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 220       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 221     }
 222   }
 223 
 224   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 225     FloatRegister r = as_FloatRegister(i);
 226     int sp_offset = 0;
 227     if (_save_vectors) {
 228       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 229                             (FloatRegister::slots_per_neon_register * i);
 230     } else {

 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 















































































 362 // Patch the callers callsite with entry to compiled code if it exists.
 363 static void patch_callers_callsite(MacroAssembler *masm) {
 364   Label L;
 365   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 366   __ cbz(rscratch1, L);
 367 
 368   __ enter();
 369   __ push_CPU_state();
 370 
 371   // VM needs caller's callsite
 372   // VM needs target method
 373   // This needs to be a long call since we will relocate this adapter to
 374   // the codeBuffer and it may not reach
 375 
 376 #ifndef PRODUCT
 377   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 378 #endif
 379 
 380   __ mov(c_rarg0, rmethod);
 381   __ mov(c_rarg1, lr);
 382   __ authenticate_return_address(c_rarg1);
 383   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 384   __ blr(rscratch1);
 385 
 386   // Explicit isb required because fixup_callers_callsite may change the code
 387   // stream.
 388   __ safepoint_isb();
 389 
 390   __ pop_CPU_state();
 391   // restore sp
 392   __ leave();
 393   __ bind(L);
 394 }
 395 














































































































 396 static void gen_c2i_adapter(MacroAssembler *masm,
 397                             int total_args_passed,
 398                             int comp_args_on_stack,
 399                             const BasicType *sig_bt,
 400                             const VMRegPair *regs,
 401                             Label& skip_fixup) {



























 402   // Before we get into the guts of the C2I adapter, see if we should be here
 403   // at all.  We've come from compiled code and are attempting to jump to the
 404   // interpreter, which means the caller made a static call to get here
 405   // (vcalls always get a compiled target if there is one).  Check for a
 406   // compiled target.  If there is one, we need to patch the caller's call.
 407   patch_callers_callsite(masm);
 408 
 409   __ bind(skip_fixup);
 410 
 411   int words_pushed = 0;

















 412 
 413   // Since all args are passed on the stack, total_args_passed *
 414   // Interpreter::stackElementSize is the space we need.










 415 
 416   int extraspace = total_args_passed * Interpreter::stackElementSize;

 417 
 418   __ mov(r19_sender_sp, sp);

 419 
 420   // stack is aligned, keep it that way
 421   extraspace = align_up(extraspace, 2*wordSize);

 422 
 423   if (extraspace)
 424     __ sub(sp, sp, extraspace);

 425 
 426   // Now write the args into the outgoing interpreter space
 427   for (int i = 0; i < total_args_passed; i++) {
 428     if (sig_bt[i] == T_VOID) {
 429       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 430       continue;
 431     }
 432 
 433     // offset to start parameters
 434     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 435     int next_off = st_off - Interpreter::stackElementSize;
 436 
 437     // Say 4 args:
 438     // i   st_off
 439     // 0   32 T_LONG
 440     // 1   24 T_VOID
 441     // 2   16 T_OBJECT
 442     // 3    8 T_BOOL
 443     // -    0 return address
 444     //
 445     // However to make thing extra confusing. Because we can fit a Java long/double in
 446     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 447     // leaves one slot empty and only stores to a single slot. In this case the
 448     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 449 
 450     VMReg r_1 = regs[i].first();
 451     VMReg r_2 = regs[i].second();
 452     if (!r_1->is_valid()) {
 453       assert(!r_2->is_valid(), "");
 454       continue;








 455     }
 456     if (r_1->is_stack()) {
 457       // memory to memory use rscratch1
 458       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 459                     + extraspace
 460                     + words_pushed * wordSize);
 461       if (!r_2->is_valid()) {
 462         // sign extend??
 463         __ ldrw(rscratch1, Address(sp, ld_off));
 464         __ str(rscratch1, Address(sp, st_off));
 465 
 466       } else {












 467 
 468         __ ldr(rscratch1, Address(sp, ld_off));
 469 
 470         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 471         // T_DOUBLE and T_LONG use two slots in the interpreter
 472         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 473           // ld_off == LSW, ld_off+wordSize == MSW
 474           // st_off == MSW, next_off == LSW
 475           __ str(rscratch1, Address(sp, next_off));


















 476 #ifdef ASSERT
 477           // Overwrite the unused slot with known junk
 478           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 479           __ str(rscratch1, Address(sp, st_off));
 480 #endif /* ASSERT */
 481         } else {
 482           __ str(rscratch1, Address(sp, st_off));
 483         }
 484       }
 485     } else if (r_1->is_Register()) {
 486       Register r = r_1->as_Register();
 487       if (!r_2->is_valid()) {
 488         // must be only an int (or less ) so move only 32bits to slot
 489         // why not sign extend??
 490         __ str(r, Address(sp, st_off));
 491       } else {
 492         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 493         // T_DOUBLE and T_LONG use two slots in the interpreter
 494         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 495           // jlong/double in gpr
 496 #ifdef ASSERT
 497           // Overwrite the unused slot with known junk
 498           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 499           __ str(rscratch1, Address(sp, st_off));
 500 #endif /* ASSERT */
 501           __ str(r, Address(sp, next_off));























 502         } else {
 503           __ str(r, Address(sp, st_off));





















 504         }
 505       }
 506     } else {
 507       assert(r_1->is_FloatRegister(), "");
 508       if (!r_2->is_valid()) {
 509         // only a float use just part of the slot
 510         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 511       } else {
 512 #ifdef ASSERT
 513         // Overwrite the unused slot with known junk
 514         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 515         __ str(rscratch1, Address(sp, st_off));
 516 #endif /* ASSERT */
 517         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 518       }
 519     }
 520   }
 521 
 522   __ mov(esp, sp); // Interp expects args on caller's expression stack
 523 
 524   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 525   __ br(rscratch1);
 526 }
 527 

 528 
 529 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 530                                     int total_args_passed,
 531                                     int comp_args_on_stack,
 532                                     const BasicType *sig_bt,
 533                                     const VMRegPair *regs) {
 534 
 535   // Note: r19_sender_sp contains the senderSP on entry. We must
 536   // preserve it since we may do a i2c -> c2i transition if we lose a
 537   // race where compiled code goes non-entrant while we get args
 538   // ready.
 539 
 540   // Adapters are frameless.
 541 
 542   // An i2c adapter is frameless because the *caller* frame, which is
 543   // interpreted, routinely repairs its own esp (from
 544   // interpreter_frame_last_sp), even if a callee has modified the
 545   // stack pointer.  It also recalculates and aligns sp.
 546 
 547   // A c2i adapter is frameless because the *callee* frame, which is
 548   // interpreted, routinely repairs its caller's sp (from sender_sp,
 549   // which is set up via the senderSP register).
 550 
 551   // In other words, if *either* the caller or callee is interpreted, we can
 552   // get the stack pointer repaired after a call.
 553 
 554   // This is why c2i and i2c adapters cannot be indefinitely composed.
 555   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 556   // both caller and callee would be compiled methods, and neither would
 557   // clean up the stack pointer changes performed by the two adapters.
 558   // If this happens, control eventually transfers back to the compiled
 559   // caller, but with an uncorrected stack, causing delayed havoc.
 560 
 561   // Cut-out for having no stack args.
 562   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 563   if (comp_args_on_stack) {
 564     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 565     __ andr(sp, rscratch1, -16);

 566   }
 567 
 568   // Will jump to the compiled code just as if compiled code was doing it.
 569   // Pre-load the register-jump target early, to schedule it better.
 570   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 571 
 572 #if INCLUDE_JVMCI
 573   if (EnableJVMCI) {
 574     // check if this call should be routed towards a specific entry point
 575     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 576     Label no_alternative_target;
 577     __ cbz(rscratch2, no_alternative_target);
 578     __ mov(rscratch1, rscratch2);
 579     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 580     __ bind(no_alternative_target);
 581   }
 582 #endif // INCLUDE_JVMCI
 583 


 584   // Now generate the shuffle code.
 585   for (int i = 0; i < total_args_passed; i++) {
 586     if (sig_bt[i] == T_VOID) {
 587       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 588       continue;
 589     }
 590 
 591     // Pick up 0, 1 or 2 words from SP+offset.

 592 
 593     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 594             "scrambled load targets?");
 595     // Load in argument order going down.
 596     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 597     // Point to interpreter value (vs. tag)
 598     int next_off = ld_off - Interpreter::stackElementSize;
 599     //
 600     //
 601     //
 602     VMReg r_1 = regs[i].first();
 603     VMReg r_2 = regs[i].second();
 604     if (!r_1->is_valid()) {
 605       assert(!r_2->is_valid(), "");
 606       continue;
 607     }
 608     if (r_1->is_stack()) {
 609       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 610       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 611       if (!r_2->is_valid()) {
 612         // sign extend???
 613         __ ldrsw(rscratch2, Address(esp, ld_off));
 614         __ str(rscratch2, Address(sp, st_off));
 615       } else {
 616         //
 617         // We are using two optoregs. This can be either T_OBJECT,
 618         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 619         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 620         // So we must adjust where to pick up the data to match the
 621         // interpreter.
 622         //
 623         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 624         // are accessed as negative so LSW is at LOW address
 625 
 626         // ld_off is MSW so get LSW
 627         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 628                            next_off : ld_off;
 629         __ ldr(rscratch2, Address(esp, offset));
 630         // st_off is LSW (i.e. reg.first())
 631         __ str(rscratch2, Address(sp, st_off));
 632       }
 633     } else if (r_1->is_Register()) {  // Register argument
 634       Register r = r_1->as_Register();
 635       if (r_2->is_valid()) {
 636         //
 637         // We are using two VMRegs. This can be either T_OBJECT,
 638         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 639         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 640         // So we must adjust where to pick up the data to match the
 641         // interpreter.

















 642 
 643         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 644                            next_off : ld_off;
 645 
 646         // this can be a misaligned move
 647         __ ldr(r, Address(esp, offset));
 648       } else {
 649         // sign extend and use a full word?
 650         __ ldrw(r, Address(esp, ld_off));
 651       }
 652     } else {
 653       if (!r_2->is_valid()) {
 654         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 655       } else {
 656         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 657       }
 658     }
 659   }
 660 
 661   __ mov(rscratch2, rscratch1);
 662   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 663   __ mov(rscratch1, rscratch2);
 664 
 665   // 6243940 We might end up in handle_wrong_method if
 666   // the callee is deoptimized as we race thru here. If that
 667   // happens we don't want to take a safepoint because the
 668   // caller frame will look interpreted and arguments are now
 669   // "compiled" so it is much better to make this transition
 670   // invisible to the stack walking code. Unfortunately if
 671   // we try and find the callee by normal means a safepoint
 672   // is possible. So we stash the desired callee in the thread
 673   // and the vm will find there should this case occur.
 674 
 675   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 676 
 677   __ br(rscratch1);
 678 }
 679 
 680 // ---------------------------------------------------------------
 681 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 682                                             int total_args_passed,
 683                                             int comp_args_on_stack,
 684                                             const BasicType *sig_bt,
 685                                             const VMRegPair *regs,
 686                                             address entry_address[AdapterBlob::ENTRY_COUNT]) {
 687   entry_address[AdapterBlob::I2C] = __ pc();
 688 
 689   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);






 690 
 691   entry_address[AdapterBlob::C2I_Unverified] = __ pc();
 692   Label skip_fixup;










 693 
 694   Register data = rscratch2;
 695   Register receiver = j_rarg0;
 696   Register tmp = r10;  // A call-clobbered register not used for arg passing
 697 
 698   // -------------------------------------------------------------------------
 699   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 700   // to the interpreter.  The args start out packed in the compiled layout.  They
 701   // need to be unpacked into the interpreter layout.  This will almost always
 702   // require some stack space.  We grow the current (compiled) stack, then repack
 703   // the args.  We  finally end in a jump to the generic interpreter entry point.
 704   // On exit from the interpreter, the interpreter will restore our SP (lest the
 705   // compiled code, which relies solely on SP and not FP, get sick).
 706 
 707   {
 708     __ block_comment("c2i_unverified_entry {");
 709     // Method might have been compiled since the call site was patched to
 710     // interpreted; if that is the case treat it as a miss so we can get
 711     // the call site corrected.
 712     __ ic_check(1 /* end_alignment */);
 713     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 714 
 715     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 716     __ cbz(rscratch1, skip_fixup);
 717     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 718     __ block_comment("} c2i_unverified_entry");
 719   }
 720 
 721   entry_address[AdapterBlob::C2I] = __ pc();


 722 
 723   // Class initialization barrier for static methods
 724   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
 725   if (VM_Version::supports_fast_class_init_checks()) {
 726     Label L_skip_barrier;
 727 
 728     { // Bypass the barrier for non-static methods
 729       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 730       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 731       __ br(Assembler::EQ, L_skip_barrier); // non-static
 732     }
 733 
 734     __ load_method_holder(rscratch2, rmethod);
 735     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 736     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 737 
 738     __ bind(L_skip_barrier);
 739     entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();


















 740   }
 741 
 742   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 743   bs->c2i_entry_barrier(masm);
 744 
 745   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 746   return;
 747 }
 748 
 749 static int c_calling_convention_priv(const BasicType *sig_bt,
 750                                          VMRegPair *regs,
 751                                          int total_args_passed) {
 752 
 753 // We return the amount of VMRegImpl stack slots we need to reserve for all
 754 // the arguments NOT counting out_preserve_stack_slots.
 755 
 756     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 757       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 758     };
 759     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 760       c_farg0, c_farg1, c_farg2, c_farg3,
 761       c_farg4, c_farg5, c_farg6, c_farg7
 762     };
 763 
 764     uint int_args = 0;
 765     uint fp_args = 0;
 766     uint stk_args = 0; // inc by 2 each time

2687 
2688   // exception pending => remove activation and forward to exception handler
2689 
2690   __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
2691 
2692   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2693   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2694 
2695   // -------------
2696   // make sure all code is generated
2697   masm->flush();
2698 
2699   // return the  blob
2700   // frame_size_words or bytes??
2701   RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2702 
2703   AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2704   return rs_blob;
2705 }
2706 


















































































































































2707 // Continuation point for throwing of implicit exceptions that are
2708 // not handled in the current activation. Fabricates an exception
2709 // oop and initiates normal exception dispatching in this
2710 // frame. Since we need to preserve callee-saved values (currently
2711 // only for C2, but done for C1 as well) we need a callee-saved oop
2712 // map and therefore have to make these stubs into RuntimeStubs
2713 // rather than BufferBlobs.  If the compiler needs all registers to
2714 // be preserved between the fault point and the exception handler
2715 // then it must assume responsibility for that in
2716 // AbstractCompiler::continuation_for_implicit_null_exception or
2717 // continuation_for_implicit_division_by_zero_exception. All other
2718 // implicit exceptions (e.g., NullPointerException or
2719 // AbstractMethodError on entry) are either at call sites or
2720 // otherwise assume that stack unwinding will be initiated, so
2721 // caller saved registers were assumed volatile in the compiler.
2722 
2723 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
2724   assert(is_throw_id(id), "expected a throw stub id");
2725 
2726   const char* name = SharedRuntime::stub_name(id);

   1 /*
   2  * Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "code/aotCodeCache.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"

 191 
 192   int frame_size_in_bytes = align_up(additional_frame_words * wordSize +
 193                                      reg_save_size * BytesPerInt, 16);
 194   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 195   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 196   // The caller will allocate additional_frame_words
 197   int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt;
 198   // CodeBlob frame size is in words.
 199   int frame_size_in_words = frame_size_in_bytes / wordSize;
 200   *total_frame_words = frame_size_in_words;
 201 
 202   // Save Integer and Float registers.
 203   __ enter();
 204   __ push_CPU_state(_save_vectors, use_sve, sve_vector_size_in_bytes, total_predicate_in_bytes);
 205 
 206   // Set an oopmap for the call site.  This oopmap will map all
 207   // oop-registers and debug-info registers as callee-saved.  This
 208   // will allow deoptimization at this safepoint to find all possible
 209   // debug-info recordings, as well as let GC find all oops.
 210 

 211   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 212 
 213   for (int i = 0; i < Register::number_of_registers; i++) {
 214     Register r = as_Register(i);
 215     if (i <= rfp->encoding() && r != rscratch1 && r != rscratch2) {
 216       // SP offsets are in 4-byte words.
 217       // Register slots are 8 bytes wide, 32 floating-point registers.
 218       int sp_offset = Register::max_slots_per_register * i +
 219                       FloatRegister::save_slots_per_register * FloatRegister::number_of_registers;
 220       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), r->as_VMReg());
 221     }
 222   }
 223 
 224   for (int i = 0; i < FloatRegister::number_of_registers; i++) {
 225     FloatRegister r = as_FloatRegister(i);
 226     int sp_offset = 0;
 227     if (_save_vectors) {
 228       sp_offset = use_sve ? (total_predicate_in_slots + sve_vector_size_in_slots * i) :
 229                             (FloatRegister::slots_per_neon_register * i);
 230     } else {

 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 
 362 
 363 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 364 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 365 
 366 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 367 
 368   // Create the mapping between argument positions and registers.
 369 
 370   static const Register INT_ArgReg[java_return_convention_max_int] = {
 371     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 372   };
 373 
 374   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 375     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 376   };
 377 
 378   uint int_args = 0;
 379   uint fp_args = 0;
 380 
 381   for (int i = 0; i < total_args_passed; i++) {
 382     switch (sig_bt[i]) {
 383     case T_BOOLEAN:
 384     case T_CHAR:
 385     case T_BYTE:
 386     case T_SHORT:
 387     case T_INT:
 388       if (int_args < SharedRuntime::java_return_convention_max_int) {
 389         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 390         int_args ++;
 391       } else {
 392         return -1;
 393       }
 394       break;
 395     case T_VOID:
 396       // halves of T_LONG or T_DOUBLE
 397       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 398       regs[i].set_bad();
 399       break;
 400     case T_LONG:
 401       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 402       // fall through
 403     case T_OBJECT:
 404     case T_ARRAY:
 405     case T_ADDRESS:
 406       // Should T_METADATA be added to java_calling_convention as well ?
 407     case T_METADATA:
 408       if (int_args < SharedRuntime::java_return_convention_max_int) {
 409         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 410         int_args ++;
 411       } else {
 412         return -1;
 413       }
 414       break;
 415     case T_FLOAT:
 416       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 417         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 418         fp_args ++;
 419       } else {
 420         return -1;
 421       }
 422       break;
 423     case T_DOUBLE:
 424       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 425       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 426         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 427         fp_args ++;
 428       } else {
 429         return -1;
 430       }
 431       break;
 432     default:
 433       ShouldNotReachHere();
 434       break;
 435     }
 436   }
 437 
 438   return int_args + fp_args;
 439 }
 440 
 441 // Patch the callers callsite with entry to compiled code if it exists.
 442 static void patch_callers_callsite(MacroAssembler *masm) {
 443   Label L;
 444   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 445   __ cbz(rscratch1, L);
 446 
 447   __ enter();
 448   __ push_CPU_state();
 449 
 450   // VM needs caller's callsite
 451   // VM needs target method
 452   // This needs to be a long call since we will relocate this adapter to
 453   // the codeBuffer and it may not reach
 454 
 455 #ifndef PRODUCT
 456   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 457 #endif
 458 
 459   __ mov(c_rarg0, rmethod);
 460   __ mov(c_rarg1, lr);
 461   __ authenticate_return_address(c_rarg1);
 462   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 463   __ blr(rscratch1);
 464 
 465   // Explicit isb required because fixup_callers_callsite may change the code
 466   // stream.
 467   __ safepoint_isb();
 468 
 469   __ pop_CPU_state();
 470   // restore sp
 471   __ leave();
 472   __ bind(L);
 473 }
 474 
 475 // For each inline type argument, sig includes the list of fields of
 476 // the inline type. This utility function computes the number of
 477 // arguments for the call if inline types are passed by reference (the
 478 // calling convention the interpreter expects).
 479 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 480   int total_args_passed = 0;
 481   if (InlineTypePassFieldsAsArgs) {
 482     for (int i = 0; i < sig_extended->length(); i++) {
 483       BasicType bt = sig_extended->at(i)._bt;
 484       if (bt == T_METADATA) {
 485         // In sig_extended, an inline type argument starts with:
 486         // T_METADATA, followed by the types of the fields of the
 487         // inline type and T_VOID to mark the end of the value
 488         // type. Inline types are flattened so, for instance, in the
 489         // case of an inline type with an int field and an inline type
 490         // field that itself has 2 fields, an int and a long:
 491         // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 492         // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 493         // (outer inline type)
 494         total_args_passed++;
 495         int vt = 1;
 496         do {
 497           i++;
 498           BasicType bt = sig_extended->at(i)._bt;
 499           BasicType prev_bt = sig_extended->at(i-1)._bt;
 500           if (bt == T_METADATA) {
 501             vt++;
 502           } else if (bt == T_VOID &&
 503                      prev_bt != T_LONG &&
 504                      prev_bt != T_DOUBLE) {
 505             vt--;
 506           }
 507         } while (vt != 0);
 508       } else {
 509         total_args_passed++;
 510       }
 511     }
 512   } else {
 513     total_args_passed = sig_extended->length();
 514   }
 515   return total_args_passed;
 516 }
 517 
 518 
 519 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 520                                    BasicType bt,
 521                                    BasicType prev_bt,
 522                                    size_t size_in_bytes,
 523                                    const VMRegPair& reg_pair,
 524                                    const Address& to,
 525                                    Register tmp1,
 526                                    Register tmp2,
 527                                    Register tmp3,
 528                                    int extraspace,
 529                                    bool is_oop) {
 530   if (bt == T_VOID) {
 531     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 532     return;
 533   }
 534 
 535   // Say 4 args:
 536   // i   st_off
 537   // 0   32 T_LONG
 538   // 1   24 T_VOID
 539   // 2   16 T_OBJECT
 540   // 3    8 T_BOOL
 541   // -    0 return address
 542   //
 543   // However to make thing extra confusing. Because we can fit a Java long/double in
 544   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 545   // leaves one slot empty and only stores to a single slot. In this case the
 546   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 547 
 548   bool wide = (size_in_bytes == wordSize);
 549   VMReg r_1 = reg_pair.first();
 550   VMReg r_2 = reg_pair.second();
 551   assert(r_2->is_valid() == wide, "invalid size");
 552   if (!r_1->is_valid()) {
 553     assert(!r_2->is_valid(), "");
 554     return;
 555   }
 556 
 557   if (!r_1->is_FloatRegister()) {
 558     Register val = r25;
 559     if (r_1->is_stack()) {
 560       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 561       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 562       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 563     } else {
 564       val = r_1->as_Register();
 565     }
 566     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 567     if (is_oop) {
 568       // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep it valid.
 569       __ push(to.base(), sp);
 570       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 571       __ pop(to.base(), sp);
 572     } else {
 573       __ store_sized_value(to, val, size_in_bytes);
 574     }
 575   } else {
 576     if (wide) {
 577       __ strd(r_1->as_FloatRegister(), to);
 578     } else {
 579       // only a float use just part of the slot
 580       __ strs(r_1->as_FloatRegister(), to);
 581     }
 582   }
 583 }
 584 
 585 static void gen_c2i_adapter(MacroAssembler *masm,
 586                             const GrowableArray<SigEntry>* sig_extended,


 587                             const VMRegPair *regs,
 588                             bool requires_clinit_barrier,
 589                             address& c2i_no_clinit_check_entry,
 590                             Label& skip_fixup,
 591                             address start,
 592                             OopMapSet* oop_maps,
 593                             int& frame_complete,
 594                             int& frame_size_in_words,
 595                             bool alloc_inline_receiver) {
 596   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 597     Label L_skip_barrier;
 598 
 599     { // Bypass the barrier for non-static methods
 600       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 601       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 602       __ br(Assembler::EQ, L_skip_barrier); // non-static
 603     }
 604 
 605     __ load_method_holder(rscratch2, rmethod);
 606     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 607     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 608 
 609     __ bind(L_skip_barrier);
 610     c2i_no_clinit_check_entry = __ pc();
 611   }
 612 
 613   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 614   bs->c2i_entry_barrier(masm);
 615 
 616   // Before we get into the guts of the C2I adapter, see if we should be here
 617   // at all.  We've come from compiled code and are attempting to jump to the
 618   // interpreter, which means the caller made a static call to get here
 619   // (vcalls always get a compiled target if there is one).  Check for a
 620   // compiled target.  If there is one, we need to patch the caller's call.
 621   patch_callers_callsite(masm);
 622 
 623   __ bind(skip_fixup);
 624 
 625   // Name some registers to be used in the following code. We can use
 626   // anything except r0-r7 which are arguments in the Java calling
 627   // convention, rmethod (r12), and r19 which holds the outgoing sender
 628   // SP for the interpreter.
 629   Register buf_array = r10;   // Array of buffered inline types
 630   Register buf_oop = r11;     // Buffered inline type oop
 631   Register tmp1 = r15;
 632   Register tmp2 = r16;
 633   Register tmp3 = r17;
 634 
 635 #ifndef ASSERT
 636   RegSet clobbered_gp_regs = MacroAssembler::call_clobbered_gp_registers();
 637   assert(clobbered_gp_regs.contains(buf_array), "buf_array must be saved explicitly if it's not a clobber");
 638   assert(clobbered_gp_regs.contains(buf_oop), "buf_oop must be saved explicitly if it's not a clobber");
 639   assert(clobbered_gp_regs.contains(tmp1), "tmp1 must be saved explicitly if it's not a clobber");
 640   assert(clobbered_gp_regs.contains(tmp2), "tmp2 must be saved explicitly if it's not a clobber");
 641   assert(clobbered_gp_regs.contains(tmp3), "tmp3 must be saved explicitly if it's not a clobber");
 642 #endif
 643 
 644   if (InlineTypePassFieldsAsArgs) {
 645     // Is there an inline type argument?
 646     bool has_inline_argument = false;
 647     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 648       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 649     }
 650     if (has_inline_argument) {
 651       // There is at least an inline type argument: we're coming from
 652       // compiled code so we have no buffers to back the inline types
 653       // Allocate the buffers here with a runtime call.
 654       RegisterSaver reg_save(true /* save_vectors */);
 655       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 656 
 657       frame_complete = __ offset();
 658       address the_pc = __ pc();
 659 
 660       Label retaddr;
 661       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 662 
 663       __ mov(c_rarg0, rthread);
 664       __ mov(c_rarg1, rmethod);
 665       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 666 
 667       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 668       __ blr(rscratch1);
 669       __ bind(retaddr);
 670 
 671       oop_maps->add_gc_map(__ pc() - start, map);
 672       __ reset_last_Java_frame(false);




 673 
 674       reg_save.restore_live_registers(masm);















 675 
 676       Label no_exception;
 677       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 678       __ cbz(rscratch1, no_exception);
 679 
 680       __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
 681       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 682       __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 683 
 684       __ bind(no_exception);
 685 
 686       // We get an array of objects from the runtime call
 687       __ get_vm_result_oop(buf_array, rthread);
 688       __ get_vm_result_metadata(rmethod, rthread); // TODO: required to keep the callee Method live?
 689     }
 690   }








 691 
 692   // Since all args are passed on the stack, total_args_passed *
 693   // Interpreter::stackElementSize is the space we need.
 694 
 695   int total_args_passed = compute_total_args_passed_int(sig_extended);
 696   int extraspace = total_args_passed * Interpreter::stackElementSize;
 697 
 698   // stack is aligned, keep it that way
 699   extraspace = align_up(extraspace, StackAlignmentInBytes);
 700 
 701   // set senderSP value
 702   __ mov(r19_sender_sp, sp);
 703 
 704   __ sub(sp, sp, extraspace);
 705 
 706   // Now write the args into the outgoing interpreter space
 707 
 708   // next_arg_comp is the next argument from the compiler point of
 709   // view (inline type fields are passed in registers/on the stack). In
 710   // sig_extended, an inline type argument starts with: T_METADATA,
 711   // followed by the types of the fields of the inline type and T_VOID
 712   // to mark the end of the inline type. ignored counts the number of
 713   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 714   // used to get the buffer for that argument from the pool of buffers
 715   // we allocated above and want to pass to the
 716   // interpreter. next_arg_int is the next argument from the
 717   // interpreter point of view (inline types are passed by reference).
 718   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 719        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 720     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 721     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 722     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 723     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 724     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 725       int next_off = st_off - Interpreter::stackElementSize;
 726       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 727       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 728       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 729       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 730                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 731       next_arg_int++;
 732 #ifdef ASSERT
 733       if (bt == T_LONG || bt == T_DOUBLE) {
 734         // Overwrite the unused slot with known junk
 735         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 736         __ str(rscratch1, Address(sp, st_off));



 737       }















 738 #endif /* ASSERT */
 739     } else {
 740       ignored++;
 741       // get the buffer from the just allocated pool of buffers
 742       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 743       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 744       next_vt_arg++; next_arg_int++;
 745       int vt = 1;
 746       // write fields we get from compiled code in registers/stack
 747       // slots to the buffer: we know we are done with that inline type
 748       // argument when we hit the T_VOID that acts as an end of inline
 749       // type delimiter for this inline type. Inline types are flattened
 750       // so we might encounter embedded inline types. Each entry in
 751       // sig_extended contains a field offset in the buffer.
 752       Label L_null;
 753       do {
 754         next_arg_comp++;
 755         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 756         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 757         if (bt == T_METADATA) {
 758           vt++;
 759           ignored++;
 760         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 761           vt--;
 762           ignored++;
 763         } else {
 764           int off = sig_extended->at(next_arg_comp)._offset;
 765           if (off == -1) {
 766             // Nullable inline type argument, emit null check
 767             VMReg reg = regs[next_arg_comp-ignored].first();
 768             Label L_notNull;
 769             if (reg->is_stack()) {
 770               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 771               __ ldrb(tmp1, Address(sp, ld_off));
 772               __ cbnz(tmp1, L_notNull);
 773             } else {
 774               __ cbnz(reg->as_Register(), L_notNull);
 775             }
 776             __ str(zr, Address(sp, st_off));
 777             __ b(L_null);
 778             __ bind(L_notNull);
 779             continue;
 780           }
 781           assert(off > 0, "offset in object should be positive");
 782           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 783           bool is_oop = is_reference_type(bt);
 784           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 785                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 786         }
 787       } while (vt != 0);
 788       // pass the buffer to the interpreter
 789       __ str(buf_oop, Address(sp, st_off));
 790       __ bind(L_null);










 791     }
 792   }
 793 
 794   __ mov(esp, sp); // Interp expects args on caller's expression stack
 795 
 796   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 797   __ br(rscratch1);
 798 }
 799 
 800 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 801 





 802 
 803   // Note: r19_sender_sp contains the senderSP on entry. We must
 804   // preserve it since we may do a i2c -> c2i transition if we lose a
 805   // race where compiled code goes non-entrant while we get args
 806   // ready.
 807 
 808   // Adapters are frameless.
 809 
 810   // An i2c adapter is frameless because the *caller* frame, which is
 811   // interpreted, routinely repairs its own esp (from
 812   // interpreter_frame_last_sp), even if a callee has modified the
 813   // stack pointer.  It also recalculates and aligns sp.
 814 
 815   // A c2i adapter is frameless because the *callee* frame, which is
 816   // interpreted, routinely repairs its caller's sp (from sender_sp,
 817   // which is set up via the senderSP register).
 818 
 819   // In other words, if *either* the caller or callee is interpreted, we can
 820   // get the stack pointer repaired after a call.
 821 
 822   // This is why c2i and i2c adapters cannot be indefinitely composed.
 823   // In particular, if a c2i adapter were to somehow call an i2c adapter,
 824   // both caller and callee would be compiled methods, and neither would
 825   // clean up the stack pointer changes performed by the two adapters.
 826   // If this happens, control eventually transfers back to the compiled
 827   // caller, but with an uncorrected stack, causing delayed havoc.
 828 
 829   // Cut-out for having no stack args.
 830   int comp_words_on_stack = 0;
 831   if (comp_args_on_stack) {
 832      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 833      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 834      __ andr(sp, rscratch1, -16);
 835   }
 836 
 837   // Will jump to the compiled code just as if compiled code was doing it.
 838   // Pre-load the register-jump target early, to schedule it better.
 839   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 840 
 841 #if INCLUDE_JVMCI
 842   if (EnableJVMCI) {
 843     // check if this call should be routed towards a specific entry point
 844     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 845     Label no_alternative_target;
 846     __ cbz(rscratch2, no_alternative_target);
 847     __ mov(rscratch1, rscratch2);
 848     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 849     __ bind(no_alternative_target);
 850   }
 851 #endif // INCLUDE_JVMCI
 852 
 853   int total_args_passed = sig->length();
 854 
 855   // Now generate the shuffle code.
 856   for (int i = 0; i < total_args_passed; i++) {
 857     BasicType bt = sig->at(i)._bt;
 858     if (bt == T_VOID) {
 859       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 860       continue;
 861     }
 862 
 863     // Pick up 0, 1 or 2 words from SP+offset.
 864     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 865 


 866     // Load in argument order going down.
 867     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 868     // Point to interpreter value (vs. tag)
 869     int next_off = ld_off - Interpreter::stackElementSize;
 870     //
 871     //
 872     //
 873     VMReg r_1 = regs[i].first();
 874     VMReg r_2 = regs[i].second();
 875     if (!r_1->is_valid()) {
 876       assert(!r_2->is_valid(), "");
 877       continue;
 878     }
 879     if (r_1->is_stack()) {
 880       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 881       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 882       if (!r_2->is_valid()) {
 883         // sign extend???
 884         __ ldrsw(rscratch2, Address(esp, ld_off));
 885         __ str(rscratch2, Address(sp, st_off));
 886       } else {
 887         //
 888         // We are using two optoregs. This can be either T_OBJECT,
 889         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 890         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 891         // So we must adjust where to pick up the data to match the
 892         // interpreter.
 893         //
 894         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 895         // are accessed as negative so LSW is at LOW address
 896 
 897         // ld_off is MSW so get LSW
 898         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 899         __ ldr(rscratch2, Address(esp, offset));
 900         // st_off is LSW (i.e. reg.first())
 901          __ str(rscratch2, Address(sp, st_off));
 902        }
 903      } else if (r_1->is_Register()) {  // Register argument
 904        Register r = r_1->as_Register();
 905        if (r_2->is_valid()) {
 906          //
 907          // We are using two VMRegs. This can be either T_OBJECT,
 908          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 909          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 910          // So we must adjust where to pick up the data to match the
 911          // interpreter.
 912 
 913         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 914 
 915          // this can be a misaligned move
 916          __ ldr(r, Address(esp, offset));
 917        } else {
 918          // sign extend and use a full word?
 919          __ ldrw(r, Address(esp, ld_off));
 920        }
 921      } else {
 922        if (!r_2->is_valid()) {
 923          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 924        } else {
 925          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 926        }
 927      }
 928    }
 929 

















 930 
 931   __ mov(rscratch2, rscratch1);
 932   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 933   __ mov(rscratch1, rscratch2);
 934 
 935   // 6243940 We might end up in handle_wrong_method if
 936   // the callee is deoptimized as we race thru here. If that
 937   // happens we don't want to take a safepoint because the
 938   // caller frame will look interpreted and arguments are now
 939   // "compiled" so it is much better to make this transition
 940   // invisible to the stack walking code. Unfortunately if
 941   // we try and find the callee by normal means a safepoint
 942   // is possible. So we stash the desired callee in the thread
 943   // and the vm will find there should this case occur.
 944 
 945   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 946   __ br(rscratch1);
 947 }
 948 
 949 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 950   Register data = rscratch2;
 951   __ ic_check(1 /* end_alignment */);
 952   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));




 953 
 954   // Method might have been compiled since the call site was patched to
 955   // interpreted; if that is the case treat it as a miss so we can get
 956   // the call site corrected.
 957   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 958   __ cbz(rscratch1, skip_fixup);
 959   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 960 }
 961 
 962 // ---------------------------------------------------------------
 963 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 964                                             int comp_args_on_stack,
 965                                             const GrowableArray<SigEntry>* sig,
 966                                             const VMRegPair* regs,
 967                                             const GrowableArray<SigEntry>* sig_cc,
 968                                             const VMRegPair* regs_cc,
 969                                             const GrowableArray<SigEntry>* sig_cc_ro,
 970                                             const VMRegPair* regs_cc_ro,
 971                                             address entry_address[AdapterBlob::ENTRY_COUNT],
 972                                             AdapterBlob*& new_adapter,
 973                                             bool allocate_code_blob) {
 974 
 975   entry_address[AdapterBlob::I2C] = __ pc();
 976   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

 977 
 978   // -------------------------------------------------------------------------
 979   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 980   // to the interpreter.  The args start out packed in the compiled layout.  They
 981   // need to be unpacked into the interpreter layout.  This will almost always
 982   // require some stack space.  We grow the current (compiled) stack, then repack
 983   // the args.  We  finally end in a jump to the generic interpreter entry point.
 984   // On exit from the interpreter, the interpreter will restore our SP (lest the
 985   // compiled code, which relies solely on SP and not FP, get sick).
 986 
 987   entry_address[AdapterBlob::C2I_Unverified] = __ pc();
 988   entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
 989   Label skip_fixup;




 990 
 991   gen_inline_cache_check(masm, skip_fixup);




 992 
 993   OopMapSet* oop_maps = new OopMapSet();
 994   int frame_complete = CodeOffsets::frame_never_safe;
 995   int frame_size_in_words = 0;
 996 
 997   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
 998   entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
 999   entry_address[AdapterBlob::C2I_Inline_RO] = __ pc();
1000   if (regs_cc != regs_cc_ro) {
1001     // No class init barrier needed because method is guaranteed to be non-static
1002     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1003                     skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1004     skip_fixup.reset();
1005   }
1006 
1007   // Scalarized c2i adapter
1008   entry_address[AdapterBlob::C2I]        = __ pc();
1009   entry_address[AdapterBlob::C2I_Inline] = __ pc();
1010   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1011                   skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1012 
1013   // Non-scalarized c2i adapter
1014   if (regs != regs_cc) {
1015     entry_address[AdapterBlob::C2I_Unverified_Inline] = __ pc();
1016     Label inline_entry_skip_fixup;
1017     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1018 
1019     entry_address[AdapterBlob::C2I_Inline] = __ pc();
1020     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, entry_address[AdapterBlob::C2I_No_Clinit_Check],
1021                     inline_entry_skip_fixup, entry_address[AdapterBlob::I2C], oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1022   }
1023 
1024   // The c2i adapters might safepoint and trigger a GC. The caller must make sure that
1025   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1026   if (allocate_code_blob) {
1027     bool caller_must_gc_arguments = (regs != regs_cc);
1028     int entry_offset[AdapterHandlerEntry::ENTRIES_COUNT];
1029     assert(AdapterHandlerEntry::ENTRIES_COUNT == 7, "sanity");
1030     AdapterHandlerLibrary::address_to_offset(entry_address, entry_offset);
1031     new_adapter = AdapterBlob::create(masm->code(), entry_offset, frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1032   }






1033 }
1034 
1035 static int c_calling_convention_priv(const BasicType *sig_bt,
1036                                          VMRegPair *regs,
1037                                          int total_args_passed) {
1038 
1039 // We return the amount of VMRegImpl stack slots we need to reserve for all
1040 // the arguments NOT counting out_preserve_stack_slots.
1041 
1042     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1043       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1044     };
1045     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1046       c_farg0, c_farg1, c_farg2, c_farg3,
1047       c_farg4, c_farg5, c_farg6, c_farg7
1048     };
1049 
1050     uint int_args = 0;
1051     uint fp_args = 0;
1052     uint stk_args = 0; // inc by 2 each time

2973 
2974   // exception pending => remove activation and forward to exception handler
2975 
2976   __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset()));
2977 
2978   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2979   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2980 
2981   // -------------
2982   // make sure all code is generated
2983   masm->flush();
2984 
2985   // return the  blob
2986   // frame_size_words or bytes??
2987   RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2988 
2989   AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id));
2990   return rs_blob;
2991 }
2992 
2993 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
2994   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
2995   if (buf == nullptr) {
2996     return nullptr;
2997   }
2998   CodeBuffer buffer(buf);
2999   short buffer_locs[20];
3000   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3001                                          sizeof(buffer_locs)/sizeof(relocInfo));
3002 
3003   MacroAssembler _masm(&buffer);
3004   MacroAssembler* masm = &_masm;
3005 
3006   const Array<SigEntry>* sig_vk = vk->extended_sig();
3007   const Array<VMRegPair>* regs = vk->return_regs();
3008 
3009   int pack_fields_jobject_off = __ offset();
3010   // Resolve pre-allocated buffer from JNI handle.
3011   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3012   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3013   __ ldr(r0, Address(Rresult));
3014   __ resolve_jobject(r0 /* value */,
3015                      rthread /* thread */,
3016                      r12 /* tmp */);
3017   __ str(r0, Address(Rresult));
3018 
3019   int pack_fields_off = __ offset();
3020 
3021   int j = 1;
3022   for (int i = 0; i < sig_vk->length(); i++) {
3023     BasicType bt = sig_vk->at(i)._bt;
3024     if (bt == T_METADATA) {
3025       continue;
3026     }
3027     if (bt == T_VOID) {
3028       if (sig_vk->at(i-1)._bt == T_LONG ||
3029           sig_vk->at(i-1)._bt == T_DOUBLE) {
3030         j++;
3031       }
3032       continue;
3033     }
3034     int off = sig_vk->at(i)._offset;
3035     VMRegPair pair = regs->at(j);
3036     VMReg r_1 = pair.first();
3037     VMReg r_2 = pair.second();
3038     Address to(r0, off);
3039     if (bt == T_FLOAT) {
3040       __ strs(r_1->as_FloatRegister(), to);
3041     } else if (bt == T_DOUBLE) {
3042       __ strd(r_1->as_FloatRegister(), to);
3043     } else {
3044       Register val = r_1->as_Register();
3045       assert_different_registers(to.base(), val, r15, r16, r17);
3046       if (is_reference_type(bt)) {
3047         // store_heap_oop transitively calls oop_store_at which corrupts to.base(). We need to keep r0 valid.
3048         __ mov(r17, r0);
3049         Address to_with_r17(r17, off);
3050         __ store_heap_oop(to_with_r17, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3051       } else {
3052         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3053       }
3054     }
3055     j++;
3056   }
3057   assert(j == regs->length(), "missed a field?");
3058   if (vk->has_nullable_atomic_layout()) {
3059     // Zero the null marker (setting it to 1 would be better but would require an additional register)
3060     __ strb(zr, Address(r0, vk->null_marker_offset()));
3061   }
3062   __ ret(lr);
3063 
3064   int unpack_fields_off = __ offset();
3065 
3066   Label skip;
3067   Label not_null;
3068   __ cbnz(r0, not_null);
3069 
3070   // Return value is null. Zero oop registers to make the GC happy.
3071   j = 1;
3072   for (int i = 0; i < sig_vk->length(); i++) {
3073     BasicType bt = sig_vk->at(i)._bt;
3074     if (bt == T_METADATA) {
3075       continue;
3076     }
3077     if (bt == T_VOID) {
3078       if (sig_vk->at(i-1)._bt == T_LONG ||
3079           sig_vk->at(i-1)._bt == T_DOUBLE) {
3080         j++;
3081       }
3082       continue;
3083     }
3084     if (bt == T_OBJECT || bt == T_ARRAY) {
3085       VMRegPair pair = regs->at(j);
3086       VMReg r_1 = pair.first();
3087       __ mov(r_1->as_Register(), zr);
3088     }
3089     j++;
3090   }
3091   __ b(skip);
3092   __ bind(not_null);
3093 
3094   j = 1;
3095   for (int i = 0; i < sig_vk->length(); i++) {
3096     BasicType bt = sig_vk->at(i)._bt;
3097     if (bt == T_METADATA) {
3098       continue;
3099     }
3100     if (bt == T_VOID) {
3101       if (sig_vk->at(i-1)._bt == T_LONG ||
3102           sig_vk->at(i-1)._bt == T_DOUBLE) {
3103         j++;
3104       }
3105       continue;
3106     }
3107     int off = sig_vk->at(i)._offset;
3108     assert(off > 0, "offset in object should be positive");
3109     VMRegPair pair = regs->at(j);
3110     VMReg r_1 = pair.first();
3111     VMReg r_2 = pair.second();
3112     Address from(r0, off);
3113     if (bt == T_FLOAT) {
3114       __ ldrs(r_1->as_FloatRegister(), from);
3115     } else if (bt == T_DOUBLE) {
3116       __ ldrd(r_1->as_FloatRegister(), from);
3117     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3118       assert_different_registers(r0, r_1->as_Register());
3119       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3120     } else {
3121       assert(is_java_primitive(bt), "unexpected basic type");
3122       assert_different_registers(r0, r_1->as_Register());
3123       size_t size_in_bytes = type2aelembytes(bt);
3124       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3125     }
3126     j++;
3127   }
3128   assert(j == regs->length(), "missed a field?");
3129 
3130   __ bind(skip);
3131 
3132   __ ret(lr);
3133 
3134   __ flush();
3135 
3136   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3137 }
3138 
3139 // Continuation point for throwing of implicit exceptions that are
3140 // not handled in the current activation. Fabricates an exception
3141 // oop and initiates normal exception dispatching in this
3142 // frame. Since we need to preserve callee-saved values (currently
3143 // only for C2, but done for C1 as well) we need a callee-saved oop
3144 // map and therefore have to make these stubs into RuntimeStubs
3145 // rather than BufferBlobs.  If the compiler needs all registers to
3146 // be preserved between the fault point and the exception handler
3147 // then it must assume responsibility for that in
3148 // AbstractCompiler::continuation_for_implicit_null_exception or
3149 // continuation_for_implicit_division_by_zero_exception. All other
3150 // implicit exceptions (e.g., NullPointerException or
3151 // AbstractMethodError on entry) are either at call sites or
3152 // otherwise assume that stack unwinding will be initiated, so
3153 // caller saved registers were assumed volatile in the compiler.
3154 
3155 RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) {
3156   assert(is_throw_id(id), "expected a throw stub id");
3157 
3158   const char* name = SharedRuntime::stub_name(id);
< prev index next >