< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"

  29 #include "code/codeCache.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "interpreter/interp_masm.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "nativeInst_aarch64.hpp"
  40 #include "oops/klass.inline.hpp"
  41 #include "oops/method.inline.hpp"
  42 #include "prims/methodHandles.hpp"
  43 #include "runtime/continuation.hpp"
  44 #include "runtime/continuationEntry.inline.hpp"
  45 #include "runtime/globals.hpp"
  46 #include "runtime/jniHandles.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"

 341       break;
 342     case T_DOUBLE:
 343       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 344       if (fp_args < Argument::n_float_register_parameters_j) {
 345         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 346       } else {
 347         stk_args = align_up(stk_args, 2);
 348         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 349         stk_args += 2;
 350       }
 351       break;
 352     default:
 353       ShouldNotReachHere();
 354       break;
 355     }
 356   }
 357 
 358   return stk_args;
 359 }
 360 















































































 361 // Patch the callers callsite with entry to compiled code if it exists.
 362 static void patch_callers_callsite(MacroAssembler *masm) {
 363   Label L;
 364   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 365   __ cbz(rscratch1, L);
 366 
 367   __ enter();
 368   __ push_CPU_state();
 369 
 370   // VM needs caller's callsite
 371   // VM needs target method
 372   // This needs to be a long call since we will relocate this adapter to
 373   // the codeBuffer and it may not reach
 374 
 375 #ifndef PRODUCT
 376   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 377 #endif
 378 
 379   __ mov(c_rarg0, rmethod);
 380   __ mov(c_rarg1, lr);
 381   __ authenticate_return_address(c_rarg1);
 382   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 383   __ blr(rscratch1);
 384 
 385   // Explicit isb required because fixup_callers_callsite may change the code
 386   // stream.
 387   __ safepoint_isb();
 388 
 389   __ pop_CPU_state();
 390   // restore sp
 391   __ leave();
 392   __ bind(L);
 393 }
 394 












































































































 395 static void gen_c2i_adapter(MacroAssembler *masm,
 396                             int total_args_passed,
 397                             int comp_args_on_stack,
 398                             const BasicType *sig_bt,
 399                             const VMRegPair *regs,
 400                             Label& skip_fixup) {



























 401   // Before we get into the guts of the C2I adapter, see if we should be here
 402   // at all.  We've come from compiled code and are attempting to jump to the
 403   // interpreter, which means the caller made a static call to get here
 404   // (vcalls always get a compiled target if there is one).  Check for a
 405   // compiled target.  If there is one, we need to patch the caller's call.
 406   patch_callers_callsite(masm);
 407 
 408   __ bind(skip_fixup);
 409 
 410   int words_pushed = 0;





















 411 
 412   // Since all args are passed on the stack, total_args_passed *
 413   // Interpreter::stackElementSize is the space we need.
 414 
 415   int extraspace = total_args_passed * Interpreter::stackElementSize;

 416 
 417   __ mov(r19_sender_sp, sp);


 418 
 419   // stack is aligned, keep it that way
 420   extraspace = align_up(extraspace, 2*wordSize);

 421 
 422   if (extraspace)
 423     __ sub(sp, sp, extraspace);
 424 
 425   // Now write the args into the outgoing interpreter space
 426   for (int i = 0; i < total_args_passed; i++) {
 427     if (sig_bt[i] == T_VOID) {
 428       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 429       continue;
 430     }
 431 
 432     // offset to start parameters
 433     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 434     int next_off = st_off - Interpreter::stackElementSize;
 435 
 436     // Say 4 args:
 437     // i   st_off
 438     // 0   32 T_LONG
 439     // 1   24 T_VOID
 440     // 2   16 T_OBJECT
 441     // 3    8 T_BOOL
 442     // -    0 return address
 443     //
 444     // However to make thing extra confusing. Because we can fit a Java long/double in
 445     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 446     // leaves one slot empty and only stores to a single slot. In this case the
 447     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 448 
 449     VMReg r_1 = regs[i].first();
 450     VMReg r_2 = regs[i].second();
 451     if (!r_1->is_valid()) {
 452       assert(!r_2->is_valid(), "");
 453       continue;




 454     }
 455     if (r_1->is_stack()) {
 456       // memory to memory use rscratch1
 457       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 458                     + extraspace
 459                     + words_pushed * wordSize);
 460       if (!r_2->is_valid()) {
 461         // sign extend??
 462         __ ldrw(rscratch1, Address(sp, ld_off));
 463         __ str(rscratch1, Address(sp, st_off));
 464 
 465       } else {










 466 
 467         __ ldr(rscratch1, Address(sp, ld_off));
 468 
 469         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 470         // T_DOUBLE and T_LONG use two slots in the interpreter
 471         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 472           // ld_off == LSW, ld_off+wordSize == MSW
 473           // st_off == MSW, next_off == LSW
 474           __ str(rscratch1, Address(sp, next_off));




















 475 #ifdef ASSERT
 476           // Overwrite the unused slot with known junk
 477           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 478           __ str(rscratch1, Address(sp, st_off));
 479 #endif /* ASSERT */
 480         } else {
 481           __ str(rscratch1, Address(sp, st_off));
 482         }
 483       }
 484     } else if (r_1->is_Register()) {
 485       Register r = r_1->as_Register();
 486       if (!r_2->is_valid()) {
 487         // must be only an int (or less ) so move only 32bits to slot
 488         // why not sign extend??
 489         __ str(r, Address(sp, st_off));
 490       } else {
 491         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 492         // T_DOUBLE and T_LONG use two slots in the interpreter
 493         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 494           // jlong/double in gpr
 495 #ifdef ASSERT
 496           // Overwrite the unused slot with known junk
 497           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 498           __ str(rscratch1, Address(sp, st_off));
 499 #endif /* ASSERT */
 500           __ str(r, Address(sp, next_off));























 501         } else {
 502           __ str(r, Address(sp, st_off));





















 503         }
 504       }
 505     } else {
 506       assert(r_1->is_FloatRegister(), "");
 507       if (!r_2->is_valid()) {
 508         // only a float use just part of the slot
 509         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 510       } else {
 511 #ifdef ASSERT
 512         // Overwrite the unused slot with known junk
 513         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 514         __ str(rscratch1, Address(sp, st_off));
 515 #endif /* ASSERT */
 516         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 517       }
 518     }
 519   }
 520 
 521   __ mov(esp, sp); // Interp expects args on caller's expression stack
 522 
 523   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 524   __ br(rscratch1);
 525 }
 526 

 527 
 528 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 529                                     int total_args_passed,
 530                                     int comp_args_on_stack,
 531                                     const BasicType *sig_bt,
 532                                     const VMRegPair *regs) {
 533 
 534   // Note: r19_sender_sp contains the senderSP on entry. We must
 535   // preserve it since we may do a i2c -> c2i transition if we lose a
 536   // race where compiled code goes non-entrant while we get args
 537   // ready.
 538 
 539   // Adapters are frameless.
 540 
 541   // An i2c adapter is frameless because the *caller* frame, which is
 542   // interpreted, routinely repairs its own esp (from
 543   // interpreter_frame_last_sp), even if a callee has modified the
 544   // stack pointer.  It also recalculates and aligns sp.
 545 
 546   // A c2i adapter is frameless because the *callee* frame, which is
 547   // interpreted, routinely repairs its caller's sp (from sender_sp,
 548   // which is set up via the senderSP register).
 549 
 550   // In other words, if *either* the caller or callee is interpreted, we can
 551   // get the stack pointer repaired after a call.
 552 

 575       range_check(masm, rax, r11,
 576                   StubRoutines::initial_stubs_code()->code_begin(),
 577                   StubRoutines::initial_stubs_code()->code_end(),
 578                   L_ok);
 579     }
 580     if (StubRoutines::final_stubs_code() != nullptr) {
 581       range_check(masm, rax, r11,
 582                   StubRoutines::final_stubs_code()->code_begin(),
 583                   StubRoutines::final_stubs_code()->code_end(),
 584                   L_ok);
 585     }
 586     const char* msg = "i2c adapter must return to an interpreter frame";
 587     __ block_comment(msg);
 588     __ stop(msg);
 589     __ bind(L_ok);
 590     __ block_comment("} verify_i2ce ");
 591 #endif
 592   }
 593 
 594   // Cut-out for having no stack args.
 595   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 596   if (comp_args_on_stack) {
 597     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 598     __ andr(sp, rscratch1, -16);

 599   }
 600 
 601   // Will jump to the compiled code just as if compiled code was doing it.
 602   // Pre-load the register-jump target early, to schedule it better.
 603   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 604 
 605 #if INCLUDE_JVMCI
 606   if (EnableJVMCI) {
 607     // check if this call should be routed towards a specific entry point
 608     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 609     Label no_alternative_target;
 610     __ cbz(rscratch2, no_alternative_target);
 611     __ mov(rscratch1, rscratch2);
 612     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 613     __ bind(no_alternative_target);
 614   }
 615 #endif // INCLUDE_JVMCI
 616 


 617   // Now generate the shuffle code.
 618   for (int i = 0; i < total_args_passed; i++) {
 619     if (sig_bt[i] == T_VOID) {
 620       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 621       continue;
 622     }
 623 
 624     // Pick up 0, 1 or 2 words from SP+offset.

 625 
 626     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 627             "scrambled load targets?");
 628     // Load in argument order going down.
 629     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 630     // Point to interpreter value (vs. tag)
 631     int next_off = ld_off - Interpreter::stackElementSize;
 632     //
 633     //
 634     //
 635     VMReg r_1 = regs[i].first();
 636     VMReg r_2 = regs[i].second();
 637     if (!r_1->is_valid()) {
 638       assert(!r_2->is_valid(), "");
 639       continue;
 640     }
 641     if (r_1->is_stack()) {
 642       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 643       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 644       if (!r_2->is_valid()) {
 645         // sign extend???
 646         __ ldrsw(rscratch2, Address(esp, ld_off));
 647         __ str(rscratch2, Address(sp, st_off));
 648       } else {
 649         //
 650         // We are using two optoregs. This can be either T_OBJECT,
 651         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 652         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 653         // So we must adjust where to pick up the data to match the
 654         // interpreter.
 655         //
 656         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 657         // are accessed as negative so LSW is at LOW address
 658 
 659         // ld_off is MSW so get LSW
 660         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 661                            next_off : ld_off;
 662         __ ldr(rscratch2, Address(esp, offset));
 663         // st_off is LSW (i.e. reg.first())
 664         __ str(rscratch2, Address(sp, st_off));
 665       }
 666     } else if (r_1->is_Register()) {  // Register argument
 667       Register r = r_1->as_Register();
 668       if (r_2->is_valid()) {
 669         //
 670         // We are using two VMRegs. This can be either T_OBJECT,
 671         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 672         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 673         // So we must adjust where to pick up the data to match the
 674         // interpreter.

















 675 
 676         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 677                            next_off : ld_off;
 678 
 679         // this can be a misaligned move
 680         __ ldr(r, Address(esp, offset));
 681       } else {
 682         // sign extend and use a full word?
 683         __ ldrw(r, Address(esp, ld_off));
 684       }
 685     } else {
 686       if (!r_2->is_valid()) {
 687         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 688       } else {
 689         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 690       }
 691     }
 692   }
 693 
 694   __ mov(rscratch2, rscratch1);
 695   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 696   __ mov(rscratch1, rscratch2);
 697 
 698   // 6243940 We might end up in handle_wrong_method if
 699   // the callee is deoptimized as we race thru here. If that
 700   // happens we don't want to take a safepoint because the
 701   // caller frame will look interpreted and arguments are now
 702   // "compiled" so it is much better to make this transition
 703   // invisible to the stack walking code. Unfortunately if
 704   // we try and find the callee by normal means a safepoint
 705   // is possible. So we stash the desired callee in the thread
 706   // and the vm will find there should this case occur.
 707 
 708   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 709 
 710   __ br(rscratch1);
 711 }
 712 
 713 // ---------------------------------------------------------------
 714 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 715                                                             int total_args_passed,
 716                                                             int comp_args_on_stack,
 717                                                             const BasicType *sig_bt,
 718                                                             const VMRegPair *regs,
 719                                                             AdapterFingerPrint* fingerprint) {
 720   address i2c_entry = __ pc();
 721 
 722   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);






 723 
 724   address c2i_unverified_entry = __ pc();
 725   Label skip_fixup;










 726 
 727   Register data = rscratch2;
 728   Register receiver = j_rarg0;
 729   Register tmp = r10;  // A call-clobbered register not used for arg passing
 730 
 731   // -------------------------------------------------------------------------
 732   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 733   // to the interpreter.  The args start out packed in the compiled layout.  They
 734   // need to be unpacked into the interpreter layout.  This will almost always
 735   // require some stack space.  We grow the current (compiled) stack, then repack
 736   // the args.  We  finally end in a jump to the generic interpreter entry point.
 737   // On exit from the interpreter, the interpreter will restore our SP (lest the
 738   // compiled code, which relies solely on SP and not FP, get sick).
 739 
 740   {
 741     __ block_comment("c2i_unverified_entry {");
 742     // Method might have been compiled since the call site was patched to
 743     // interpreted; if that is the case treat it as a miss so we can get
 744     // the call site corrected.
 745     __ ic_check(1 /* end_alignment */);
 746     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 747 
 748     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 749     __ cbz(rscratch1, skip_fixup);
 750     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 751     __ block_comment("} c2i_unverified_entry");
 752   }
 753 
 754   address c2i_entry = __ pc();


 755 
 756   // Class initialization barrier for static methods
 757   address c2i_no_clinit_check_entry = nullptr;
 758   if (VM_Version::supports_fast_class_init_checks()) {
 759     Label L_skip_barrier;





 760 
 761     { // Bypass the barrier for non-static methods
 762       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 763       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 764       __ br(Assembler::EQ, L_skip_barrier); // non-static
 765     }
 766 
 767     __ load_method_holder(rscratch2, rmethod);
 768     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 769     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));


 770 
 771     __ bind(L_skip_barrier);
 772     c2i_no_clinit_check_entry = __ pc();

 773   }
 774 
 775   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 776   bs->c2i_entry_barrier(masm);
 777 
 778   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





 779 
 780   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 781 }
 782 
 783 static int c_calling_convention_priv(const BasicType *sig_bt,
 784                                          VMRegPair *regs,
 785                                          int total_args_passed) {
 786 
 787 // We return the amount of VMRegImpl stack slots we need to reserve for all
 788 // the arguments NOT counting out_preserve_stack_slots.
 789 
 790     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 791       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 792     };
 793     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 794       c_farg0, c_farg1, c_farg2, c_farg3,
 795       c_farg4, c_farg5, c_farg6, c_farg7
 796     };
 797 
 798     uint int_args = 0;
 799     uint fp_args = 0;
 800     uint stk_args = 0; // inc by 2 each time

1797   if (method->is_synchronized()) {
1798     Label count;
1799     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1800 
1801     // Get the handle (the 2nd argument)
1802     __ mov(oop_handle_reg, c_rarg1);
1803 
1804     // Get address of the box
1805 
1806     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1807 
1808     // Load the oop from the handle
1809     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1810 
1811     if (LockingMode == LM_MONITOR) {
1812       __ b(slow_path_lock);
1813     } else if (LockingMode == LM_LEGACY) {
1814       // Load (object->mark() | 1) into swap_reg %r0
1815       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1816       __ orr(swap_reg, rscratch1, 1);




1817 
1818       // Save (object->mark() | 1) into BasicLock's displaced header
1819       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1820 
1821       // src -> dest iff dest == r0 else r0 <- dest
1822       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1823 
1824       // Hmm should this move to the slow path code area???
1825 
1826       // Test if the oopMark is an obvious stack pointer, i.e.,
1827       //  1) (mark & 3) == 0, and
1828       //  2) sp <= mark < mark + os::pagesize()
1829       // These 3 tests can be done by evaluating the following
1830       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1831       // assuming both stack pointer and pagesize have their
1832       // least significant 2 bits clear.
1833       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1834 
1835       __ sub(swap_reg, sp, swap_reg);
1836       __ neg(swap_reg, swap_reg);

2799   __ bind(pending);
2800 
2801   reg_save.restore_live_registers(masm);
2802 
2803   // exception pending => remove activation and forward to exception handler
2804 
2805   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2806 
2807   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2808   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2809 
2810   // -------------
2811   // make sure all code is generated
2812   masm->flush();
2813 
2814   // return the  blob
2815   // frame_size_words or bytes??
2816   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2817 }
2818 




















































































































2819 // Continuation point for throwing of implicit exceptions that are
2820 // not handled in the current activation. Fabricates an exception
2821 // oop and initiates normal exception dispatching in this
2822 // frame. Since we need to preserve callee-saved values (currently
2823 // only for C2, but done for C1 as well) we need a callee-saved oop
2824 // map and therefore have to make these stubs into RuntimeStubs
2825 // rather than BufferBlobs.  If the compiler needs all registers to
2826 // be preserved between the fault point and the exception handler
2827 // then it must assume responsibility for that in
2828 // AbstractCompiler::continuation_for_implicit_null_exception or
2829 // continuation_for_implicit_division_by_zero_exception. All other
2830 // implicit exceptions (e.g., NullPointerException or
2831 // AbstractMethodError on entry) are either at call sites or
2832 // otherwise assume that stack unwinding will be initiated, so
2833 // caller saved registers were assumed volatile in the compiler.
2834 
2835 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2836   assert(is_throw_id(id), "expected a throw stub id");
2837 
2838   const char* name = SharedRuntime::stub_name(id);

   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"

 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 
 362 
 363 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 364 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 365 
 366 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 367 
 368   // Create the mapping between argument positions and registers.
 369 
 370   static const Register INT_ArgReg[java_return_convention_max_int] = {
 371     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 372   };
 373 
 374   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 375     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 376   };
 377 
 378   uint int_args = 0;
 379   uint fp_args = 0;
 380 
 381   for (int i = 0; i < total_args_passed; i++) {
 382     switch (sig_bt[i]) {
 383     case T_BOOLEAN:
 384     case T_CHAR:
 385     case T_BYTE:
 386     case T_SHORT:
 387     case T_INT:
 388       if (int_args < SharedRuntime::java_return_convention_max_int) {
 389         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 390         int_args ++;
 391       } else {
 392         return -1;
 393       }
 394       break;
 395     case T_VOID:
 396       // halves of T_LONG or T_DOUBLE
 397       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 398       regs[i].set_bad();
 399       break;
 400     case T_LONG:
 401       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 402       // fall through
 403     case T_OBJECT:
 404     case T_ARRAY:
 405     case T_ADDRESS:
 406       // Should T_METADATA be added to java_calling_convention as well ?
 407     case T_METADATA:
 408       if (int_args < SharedRuntime::java_return_convention_max_int) {
 409         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 410         int_args ++;
 411       } else {
 412         return -1;
 413       }
 414       break;
 415     case T_FLOAT:
 416       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 417         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 418         fp_args ++;
 419       } else {
 420         return -1;
 421       }
 422       break;
 423     case T_DOUBLE:
 424       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 425       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 426         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 427         fp_args ++;
 428       } else {
 429         return -1;
 430       }
 431       break;
 432     default:
 433       ShouldNotReachHere();
 434       break;
 435     }
 436   }
 437 
 438   return int_args + fp_args;
 439 }
 440 
 441 // Patch the callers callsite with entry to compiled code if it exists.
 442 static void patch_callers_callsite(MacroAssembler *masm) {
 443   Label L;
 444   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 445   __ cbz(rscratch1, L);
 446 
 447   __ enter();
 448   __ push_CPU_state();
 449 
 450   // VM needs caller's callsite
 451   // VM needs target method
 452   // This needs to be a long call since we will relocate this adapter to
 453   // the codeBuffer and it may not reach
 454 
 455 #ifndef PRODUCT
 456   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 457 #endif
 458 
 459   __ mov(c_rarg0, rmethod);
 460   __ mov(c_rarg1, lr);
 461   __ authenticate_return_address(c_rarg1);
 462   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 463   __ blr(rscratch1);
 464 
 465   // Explicit isb required because fixup_callers_callsite may change the code
 466   // stream.
 467   __ safepoint_isb();
 468 
 469   __ pop_CPU_state();
 470   // restore sp
 471   __ leave();
 472   __ bind(L);
 473 }
 474 
 475 // For each inline type argument, sig includes the list of fields of
 476 // the inline type. This utility function computes the number of
 477 // arguments for the call if inline types are passed by reference (the
 478 // calling convention the interpreter expects).
 479 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 480   int total_args_passed = 0;
 481   if (InlineTypePassFieldsAsArgs) {
 482      for (int i = 0; i < sig_extended->length(); i++) {
 483        BasicType bt = sig_extended->at(i)._bt;
 484        if (bt == T_METADATA) {
 485          // In sig_extended, an inline type argument starts with:
 486          // T_METADATA, followed by the types of the fields of the
 487          // inline type and T_VOID to mark the end of the value
 488          // type. Inline types are flattened so, for instance, in the
 489          // case of an inline type with an int field and an inline type
 490          // field that itself has 2 fields, an int and a long:
 491          // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 492          // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 493          // (outer inline type)
 494          total_args_passed++;
 495          int vt = 1;
 496          do {
 497            i++;
 498            BasicType bt = sig_extended->at(i)._bt;
 499            BasicType prev_bt = sig_extended->at(i-1)._bt;
 500            if (bt == T_METADATA) {
 501              vt++;
 502            } else if (bt == T_VOID &&
 503                       prev_bt != T_LONG &&
 504                       prev_bt != T_DOUBLE) {
 505              vt--;
 506            }
 507          } while (vt != 0);
 508        } else {
 509          total_args_passed++;
 510        }
 511      }
 512   } else {
 513     total_args_passed = sig_extended->length();
 514   }
 515 
 516   return total_args_passed;
 517 }
 518 
 519 
 520 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 521                                    BasicType bt,
 522                                    BasicType prev_bt,
 523                                    size_t size_in_bytes,
 524                                    const VMRegPair& reg_pair,
 525                                    const Address& to,
 526                                    Register tmp1,
 527                                    Register tmp2,
 528                                    Register tmp3,
 529                                    int extraspace,
 530                                    bool is_oop) {
 531   if (bt == T_VOID) {
 532     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 533     return;
 534   }
 535 
 536   // Say 4 args:
 537   // i   st_off
 538   // 0   32 T_LONG
 539   // 1   24 T_VOID
 540   // 2   16 T_OBJECT
 541   // 3    8 T_BOOL
 542   // -    0 return address
 543   //
 544   // However to make thing extra confusing. Because we can fit a Java long/double in
 545   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 546   // leaves one slot empty and only stores to a single slot. In this case the
 547   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 548 
 549   bool wide = (size_in_bytes == wordSize);
 550   VMReg r_1 = reg_pair.first();
 551   VMReg r_2 = reg_pair.second();
 552   assert(r_2->is_valid() == wide, "invalid size");
 553   if (!r_1->is_valid()) {
 554     assert(!r_2->is_valid(), "");
 555     return;
 556   }
 557 
 558   if (!r_1->is_FloatRegister()) {
 559     Register val = r25;
 560     if (r_1->is_stack()) {
 561       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 562       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 563       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 564     } else {
 565       val = r_1->as_Register();
 566     }
 567     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 568     if (is_oop) {
 569       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 570     } else {
 571       __ store_sized_value(to, val, size_in_bytes);
 572     }
 573   } else {
 574     if (wide) {
 575       __ strd(r_1->as_FloatRegister(), to);
 576     } else {
 577       // only a float use just part of the slot
 578       __ strs(r_1->as_FloatRegister(), to);
 579     }
 580   }
 581 }
 582 
 583 static void gen_c2i_adapter(MacroAssembler *masm,
 584                             const GrowableArray<SigEntry>* sig_extended,


 585                             const VMRegPair *regs,
 586                             bool requires_clinit_barrier,
 587                             address& c2i_no_clinit_check_entry,
 588                             Label& skip_fixup,
 589                             address start,
 590                             OopMapSet* oop_maps,
 591                             int& frame_complete,
 592                             int& frame_size_in_words,
 593                             bool alloc_inline_receiver) {
 594   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 595     Label L_skip_barrier;
 596 
 597     { // Bypass the barrier for non-static methods
 598       __ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
 599       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 600       __ br(Assembler::EQ, L_skip_barrier); // non-static
 601     }
 602 
 603     __ load_method_holder(rscratch2, rmethod);
 604     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 605     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 606 
 607     __ bind(L_skip_barrier);
 608     c2i_no_clinit_check_entry = __ pc();
 609   }
 610 
 611   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 612   bs->c2i_entry_barrier(masm);
 613 
 614   // Before we get into the guts of the C2I adapter, see if we should be here
 615   // at all.  We've come from compiled code and are attempting to jump to the
 616   // interpreter, which means the caller made a static call to get here
 617   // (vcalls always get a compiled target if there is one).  Check for a
 618   // compiled target.  If there is one, we need to patch the caller's call.
 619   patch_callers_callsite(masm);
 620 
 621   __ bind(skip_fixup);
 622 
 623   // Name some registers to be used in the following code. We can use
 624   // anything except r0-r7 which are arguments in the Java calling
 625   // convention, rmethod (r12), and r13 which holds the outgoing sender
 626   // SP for the interpreter.
 627   Register buf_array = r10;   // Array of buffered inline types
 628   Register buf_oop = r11;     // Buffered inline type oop
 629   Register tmp1 = r15;
 630   Register tmp2 = r16;
 631   Register tmp3 = r17;
 632 
 633   if (InlineTypePassFieldsAsArgs) {
 634     // Is there an inline type argument?
 635     bool has_inline_argument = false;
 636     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 637       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 638     }
 639     if (has_inline_argument) {
 640       // There is at least an inline type argument: we're coming from
 641       // compiled code so we have no buffers to back the inline types
 642       // Allocate the buffers here with a runtime call.
 643       RegisterSaver reg_save(false /* save_vectors */);
 644       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 645 
 646       frame_complete = __ offset();
 647       address the_pc = __ pc();
 648 
 649       Label retaddr;
 650       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 651 
 652       __ mov(c_rarg0, rthread);
 653       __ mov(c_rarg1, rmethod);
 654       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 655 
 656       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 657       __ blr(rscratch1);
 658       __ bind(retaddr);
 659 
 660       oop_maps->add_gc_map(__ pc() - start, map);
 661       __ reset_last_Java_frame(false);
 662 
 663       reg_save.restore_live_registers(masm);





 664 
 665       Label no_exception;
 666       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 667       __ cbz(rscratch1, no_exception);













 668 
 669       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 670       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 671       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 672 
 673       __ bind(no_exception);
 674 
 675       // We get an array of objects from the runtime call
 676       __ get_vm_result(buf_array, rthread);
 677       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 678     }
 679   }








 680 
 681   // Since all args are passed on the stack, total_args_passed *
 682   // Interpreter::stackElementSize is the space we need.
 683 
 684   int total_args_passed = compute_total_args_passed_int(sig_extended);
 685   int extraspace = total_args_passed * Interpreter::stackElementSize;
 686 
 687   // stack is aligned, keep it that way
 688   extraspace = align_up(extraspace, StackAlignmentInBytes);
 689 
 690   // set senderSP value
 691   __ mov(r19_sender_sp, sp);
 692 
 693   __ sub(sp, sp, extraspace);
 694 
 695   // Now write the args into the outgoing interpreter space
 696 
 697   // next_arg_comp is the next argument from the compiler point of
 698   // view (inline type fields are passed in registers/on the stack). In
 699   // sig_extended, an inline type argument starts with: T_METADATA,
 700   // followed by the types of the fields of the inline type and T_VOID
 701   // to mark the end of the inline type. ignored counts the number of
 702   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 703   // used to get the buffer for that argument from the pool of buffers
 704   // we allocated above and want to pass to the
 705   // interpreter. next_arg_int is the next argument from the
 706   // interpreter point of view (inline types are passed by reference).
 707   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 708        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 709     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 710     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 711     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 712     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 713     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 714       int next_off = st_off - Interpreter::stackElementSize;
 715       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 716       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 717       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 718       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 719                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 720       next_arg_int++;
 721 #ifdef ASSERT
 722       if (bt == T_LONG || bt == T_DOUBLE) {
 723         // Overwrite the unused slot with known junk
 724         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 725         __ str(rscratch1, Address(sp, st_off));



 726       }















 727 #endif /* ASSERT */
 728     } else {
 729       ignored++;
 730       // get the buffer from the just allocated pool of buffers
 731       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 732       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 733       next_vt_arg++; next_arg_int++;
 734       int vt = 1;
 735       // write fields we get from compiled code in registers/stack
 736       // slots to the buffer: we know we are done with that inline type
 737       // argument when we hit the T_VOID that acts as an end of inline
 738       // type delimiter for this inline type. Inline types are flattened
 739       // so we might encounter embedded inline types. Each entry in
 740       // sig_extended contains a field offset in the buffer.
 741       Label L_null;
 742       do {
 743         next_arg_comp++;
 744         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 745         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 746         if (bt == T_METADATA) {
 747           vt++;
 748           ignored++;
 749         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 750           vt--;
 751           ignored++;
 752         } else {
 753           int off = sig_extended->at(next_arg_comp)._offset;
 754           if (off == -1) {
 755             // Nullable inline type argument, emit null check
 756             VMReg reg = regs[next_arg_comp-ignored].first();
 757             Label L_notNull;
 758             if (reg->is_stack()) {
 759               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 760               __ ldrb(tmp1, Address(sp, ld_off));
 761               __ cbnz(tmp1, L_notNull);
 762             } else {
 763               __ cbnz(reg->as_Register(), L_notNull);
 764             }
 765             __ str(zr, Address(sp, st_off));
 766             __ b(L_null);
 767             __ bind(L_notNull);
 768             continue;
 769           }
 770           assert(off > 0, "offset in object should be positive");
 771           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 772           bool is_oop = is_reference_type(bt);
 773           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 774                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 775         }
 776       } while (vt != 0);
 777       // pass the buffer to the interpreter
 778       __ str(buf_oop, Address(sp, st_off));
 779       __ bind(L_null);










 780     }
 781   }
 782 
 783   __ mov(esp, sp); // Interp expects args on caller's expression stack
 784 
 785   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 786   __ br(rscratch1);
 787 }
 788 
 789 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 790 





 791 
 792   // Note: r19_sender_sp contains the senderSP on entry. We must
 793   // preserve it since we may do a i2c -> c2i transition if we lose a
 794   // race where compiled code goes non-entrant while we get args
 795   // ready.
 796 
 797   // Adapters are frameless.
 798 
 799   // An i2c adapter is frameless because the *caller* frame, which is
 800   // interpreted, routinely repairs its own esp (from
 801   // interpreter_frame_last_sp), even if a callee has modified the
 802   // stack pointer.  It also recalculates and aligns sp.
 803 
 804   // A c2i adapter is frameless because the *callee* frame, which is
 805   // interpreted, routinely repairs its caller's sp (from sender_sp,
 806   // which is set up via the senderSP register).
 807 
 808   // In other words, if *either* the caller or callee is interpreted, we can
 809   // get the stack pointer repaired after a call.
 810 

 833       range_check(masm, rax, r11,
 834                   StubRoutines::initial_stubs_code()->code_begin(),
 835                   StubRoutines::initial_stubs_code()->code_end(),
 836                   L_ok);
 837     }
 838     if (StubRoutines::final_stubs_code() != nullptr) {
 839       range_check(masm, rax, r11,
 840                   StubRoutines::final_stubs_code()->code_begin(),
 841                   StubRoutines::final_stubs_code()->code_end(),
 842                   L_ok);
 843     }
 844     const char* msg = "i2c adapter must return to an interpreter frame";
 845     __ block_comment(msg);
 846     __ stop(msg);
 847     __ bind(L_ok);
 848     __ block_comment("} verify_i2ce ");
 849 #endif
 850   }
 851 
 852   // Cut-out for having no stack args.
 853   int comp_words_on_stack = 0;
 854   if (comp_args_on_stack) {
 855      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 856      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 857      __ andr(sp, rscratch1, -16);
 858   }
 859 
 860   // Will jump to the compiled code just as if compiled code was doing it.
 861   // Pre-load the register-jump target early, to schedule it better.
 862   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 863 
 864 #if INCLUDE_JVMCI
 865   if (EnableJVMCI) {
 866     // check if this call should be routed towards a specific entry point
 867     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 868     Label no_alternative_target;
 869     __ cbz(rscratch2, no_alternative_target);
 870     __ mov(rscratch1, rscratch2);
 871     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 872     __ bind(no_alternative_target);
 873   }
 874 #endif // INCLUDE_JVMCI
 875 
 876   int total_args_passed = sig->length();
 877 
 878   // Now generate the shuffle code.
 879   for (int i = 0; i < total_args_passed; i++) {
 880     BasicType bt = sig->at(i)._bt;
 881     if (bt == T_VOID) {
 882       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 883       continue;
 884     }
 885 
 886     // Pick up 0, 1 or 2 words from SP+offset.
 887     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 888 


 889     // Load in argument order going down.
 890     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 891     // Point to interpreter value (vs. tag)
 892     int next_off = ld_off - Interpreter::stackElementSize;
 893     //
 894     //
 895     //
 896     VMReg r_1 = regs[i].first();
 897     VMReg r_2 = regs[i].second();
 898     if (!r_1->is_valid()) {
 899       assert(!r_2->is_valid(), "");
 900       continue;
 901     }
 902     if (r_1->is_stack()) {
 903       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 904       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 905       if (!r_2->is_valid()) {
 906         // sign extend???
 907         __ ldrsw(rscratch2, Address(esp, ld_off));
 908         __ str(rscratch2, Address(sp, st_off));
 909       } else {
 910         //
 911         // We are using two optoregs. This can be either T_OBJECT,
 912         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 913         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 914         // So we must adjust where to pick up the data to match the
 915         // interpreter.
 916         //
 917         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 918         // are accessed as negative so LSW is at LOW address
 919 
 920         // ld_off is MSW so get LSW
 921         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 922         __ ldr(rscratch2, Address(esp, offset));
 923         // st_off is LSW (i.e. reg.first())
 924          __ str(rscratch2, Address(sp, st_off));
 925        }
 926      } else if (r_1->is_Register()) {  // Register argument
 927        Register r = r_1->as_Register();
 928        if (r_2->is_valid()) {
 929          //
 930          // We are using two VMRegs. This can be either T_OBJECT,
 931          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 932          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 933          // So we must adjust where to pick up the data to match the
 934          // interpreter.
 935 
 936         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 937 
 938          // this can be a misaligned move
 939          __ ldr(r, Address(esp, offset));
 940        } else {
 941          // sign extend and use a full word?
 942          __ ldrw(r, Address(esp, ld_off));
 943        }
 944      } else {
 945        if (!r_2->is_valid()) {
 946          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 947        } else {
 948          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 949        }
 950      }
 951    }
 952 

















 953 
 954   __ mov(rscratch2, rscratch1);
 955   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 956   __ mov(rscratch1, rscratch2);
 957 
 958   // 6243940 We might end up in handle_wrong_method if
 959   // the callee is deoptimized as we race thru here. If that
 960   // happens we don't want to take a safepoint because the
 961   // caller frame will look interpreted and arguments are now
 962   // "compiled" so it is much better to make this transition
 963   // invisible to the stack walking code. Unfortunately if
 964   // we try and find the callee by normal means a safepoint
 965   // is possible. So we stash the desired callee in the thread
 966   // and the vm will find there should this case occur.
 967 
 968   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 969   __ br(rscratch1);
 970 }
 971 
 972 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 973   Register data = rscratch2;
 974   __ ic_check(1 /* end_alignment */);
 975   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));




 976 
 977   // Method might have been compiled since the call site was patched to
 978   // interpreted; if that is the case treat it as a miss so we can get
 979   // the call site corrected.
 980   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 981   __ cbz(rscratch1, skip_fixup);
 982   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 983 }
 984 
 985 // ---------------------------------------------------------------
 986 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 987                                                             int comp_args_on_stack,
 988                                                             const GrowableArray<SigEntry>* sig,
 989                                                             const VMRegPair* regs,
 990                                                             const GrowableArray<SigEntry>* sig_cc,
 991                                                             const VMRegPair* regs_cc,
 992                                                             const GrowableArray<SigEntry>* sig_cc_ro,
 993                                                             const VMRegPair* regs_cc_ro,
 994                                                             AdapterFingerPrint* fingerprint,
 995                                                             AdapterBlob*& new_adapter,
 996                                                             bool allocate_code_blob) {
 997 
 998   address i2c_entry = __ pc();
 999   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

1000 
1001   // -------------------------------------------------------------------------
1002   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
1003   // to the interpreter.  The args start out packed in the compiled layout.  They
1004   // need to be unpacked into the interpreter layout.  This will almost always
1005   // require some stack space.  We grow the current (compiled) stack, then repack
1006   // the args.  We  finally end in a jump to the generic interpreter entry point.
1007   // On exit from the interpreter, the interpreter will restore our SP (lest the
1008   // compiled code, which relies solely on SP and not FP, get sick).
1009 
1010   address c2i_unverified_entry        = __ pc();
1011   address c2i_unverified_inline_entry = __ pc();
1012   Label skip_fixup;




1013 
1014   gen_inline_cache_check(masm, skip_fixup);




1015 
1016   OopMapSet* oop_maps = new OopMapSet();
1017   int frame_complete = CodeOffsets::frame_never_safe;
1018   int frame_size_in_words = 0;
1019 
1020   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1021   address c2i_no_clinit_check_entry = nullptr;
1022   address c2i_inline_ro_entry = __ pc();
1023   if (regs_cc != regs_cc_ro) {
1024     // No class init barrier needed because method is guaranteed to be non-static
1025     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1026                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1027     skip_fixup.reset();
1028   }
1029 
1030   // Scalarized c2i adapter
1031   address c2i_entry        = __ pc();
1032   address c2i_inline_entry = __ pc();
1033   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1034                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1035 
1036   // Non-scalarized c2i adapter
1037   if (regs != regs_cc) {
1038     c2i_unverified_inline_entry = __ pc();
1039     Label inline_entry_skip_fixup;
1040     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1041 
1042     c2i_inline_entry = __ pc();
1043     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1044                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1045   }
1046 


1047 
1048   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1049   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1050   if (allocate_code_blob) {
1051     bool caller_must_gc_arguments = (regs != regs_cc);
1052     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1053   }
1054 
1055   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1056 }
1057 
1058 static int c_calling_convention_priv(const BasicType *sig_bt,
1059                                          VMRegPair *regs,
1060                                          int total_args_passed) {
1061 
1062 // We return the amount of VMRegImpl stack slots we need to reserve for all
1063 // the arguments NOT counting out_preserve_stack_slots.
1064 
1065     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1066       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1067     };
1068     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1069       c_farg0, c_farg1, c_farg2, c_farg3,
1070       c_farg4, c_farg5, c_farg6, c_farg7
1071     };
1072 
1073     uint int_args = 0;
1074     uint fp_args = 0;
1075     uint stk_args = 0; // inc by 2 each time

2072   if (method->is_synchronized()) {
2073     Label count;
2074     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2075 
2076     // Get the handle (the 2nd argument)
2077     __ mov(oop_handle_reg, c_rarg1);
2078 
2079     // Get address of the box
2080 
2081     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2082 
2083     // Load the oop from the handle
2084     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2085 
2086     if (LockingMode == LM_MONITOR) {
2087       __ b(slow_path_lock);
2088     } else if (LockingMode == LM_LEGACY) {
2089       // Load (object->mark() | 1) into swap_reg %r0
2090       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2091       __ orr(swap_reg, rscratch1, 1);
2092       if (EnableValhalla) {
2093         // Mask inline_type bit such that we go to the slow path if object is an inline type
2094         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2095       }
2096 
2097       // Save (object->mark() | 1) into BasicLock's displaced header
2098       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2099 
2100       // src -> dest iff dest == r0 else r0 <- dest
2101       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
2102 
2103       // Hmm should this move to the slow path code area???
2104 
2105       // Test if the oopMark is an obvious stack pointer, i.e.,
2106       //  1) (mark & 3) == 0, and
2107       //  2) sp <= mark < mark + os::pagesize()
2108       // These 3 tests can be done by evaluating the following
2109       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2110       // assuming both stack pointer and pagesize have their
2111       // least significant 2 bits clear.
2112       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2113 
2114       __ sub(swap_reg, sp, swap_reg);
2115       __ neg(swap_reg, swap_reg);

3078   __ bind(pending);
3079 
3080   reg_save.restore_live_registers(masm);
3081 
3082   // exception pending => remove activation and forward to exception handler
3083 
3084   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
3085 
3086   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
3087   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3088 
3089   // -------------
3090   // make sure all code is generated
3091   masm->flush();
3092 
3093   // return the  blob
3094   // frame_size_words or bytes??
3095   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3096 }
3097 
3098 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3099   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3100   CodeBuffer buffer(buf);
3101   short buffer_locs[20];
3102   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3103                                          sizeof(buffer_locs)/sizeof(relocInfo));
3104 
3105   MacroAssembler _masm(&buffer);
3106   MacroAssembler* masm = &_masm;
3107 
3108   const Array<SigEntry>* sig_vk = vk->extended_sig();
3109   const Array<VMRegPair>* regs = vk->return_regs();
3110 
3111   int pack_fields_jobject_off = __ offset();
3112   // Resolve pre-allocated buffer from JNI handle.
3113   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3114   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3115   __ ldr(r0, Address(Rresult));
3116   __ resolve_jobject(r0 /* value */,
3117                      rthread /* thread */,
3118                      r12 /* tmp */);
3119   __ str(r0, Address(Rresult));
3120 
3121   int pack_fields_off = __ offset();
3122 
3123   int j = 1;
3124   for (int i = 0; i < sig_vk->length(); i++) {
3125     BasicType bt = sig_vk->at(i)._bt;
3126     if (bt == T_METADATA) {
3127       continue;
3128     }
3129     if (bt == T_VOID) {
3130       if (sig_vk->at(i-1)._bt == T_LONG ||
3131           sig_vk->at(i-1)._bt == T_DOUBLE) {
3132         j++;
3133       }
3134       continue;
3135     }
3136     int off = sig_vk->at(i)._offset;
3137     VMRegPair pair = regs->at(j);
3138     VMReg r_1 = pair.first();
3139     VMReg r_2 = pair.second();
3140     Address to(r0, off);
3141     if (bt == T_FLOAT) {
3142       __ strs(r_1->as_FloatRegister(), to);
3143     } else if (bt == T_DOUBLE) {
3144       __ strd(r_1->as_FloatRegister(), to);
3145     } else {
3146       Register val = r_1->as_Register();
3147       assert_different_registers(to.base(), val, r15, r16, r17);
3148       if (is_reference_type(bt)) {
3149         __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3150       } else {
3151         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3152       }
3153     }
3154     j++;
3155   }
3156   assert(j == regs->length(), "missed a field?");
3157   if (vk->has_nullable_atomic_layout()) {
3158     // Zero the null marker (setting it to 1 would be better but would require an additional register)
3159     __ strb(zr, Address(r0, vk->null_marker_offset()));
3160   }
3161   __ ret(lr);
3162 
3163   int unpack_fields_off = __ offset();
3164 
3165   Label skip;
3166   __ cbz(r0, skip);
3167 
3168   j = 1;
3169   for (int i = 0; i < sig_vk->length(); i++) {
3170     BasicType bt = sig_vk->at(i)._bt;
3171     if (bt == T_METADATA) {
3172       continue;
3173     }
3174     if (bt == T_VOID) {
3175       if (sig_vk->at(i-1)._bt == T_LONG ||
3176           sig_vk->at(i-1)._bt == T_DOUBLE) {
3177         j++;
3178       }
3179       continue;
3180     }
3181     int off = sig_vk->at(i)._offset;
3182     assert(off > 0, "offset in object should be positive");
3183     VMRegPair pair = regs->at(j);
3184     VMReg r_1 = pair.first();
3185     VMReg r_2 = pair.second();
3186     Address from(r0, off);
3187     if (bt == T_FLOAT) {
3188       __ ldrs(r_1->as_FloatRegister(), from);
3189     } else if (bt == T_DOUBLE) {
3190       __ ldrd(r_1->as_FloatRegister(), from);
3191     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3192       assert_different_registers(r0, r_1->as_Register());
3193       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3194     } else {
3195       assert(is_java_primitive(bt), "unexpected basic type");
3196       assert_different_registers(r0, r_1->as_Register());
3197 
3198       size_t size_in_bytes = type2aelembytes(bt);
3199       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3200     }
3201     j++;
3202   }
3203   assert(j == regs->length(), "missed a field?");
3204 
3205   __ bind(skip);
3206 
3207   __ ret(lr);
3208 
3209   __ flush();
3210 
3211   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3212 }
3213 
3214 // Continuation point for throwing of implicit exceptions that are
3215 // not handled in the current activation. Fabricates an exception
3216 // oop and initiates normal exception dispatching in this
3217 // frame. Since we need to preserve callee-saved values (currently
3218 // only for C2, but done for C1 as well) we need a callee-saved oop
3219 // map and therefore have to make these stubs into RuntimeStubs
3220 // rather than BufferBlobs.  If the compiler needs all registers to
3221 // be preserved between the fault point and the exception handler
3222 // then it must assume responsibility for that in
3223 // AbstractCompiler::continuation_for_implicit_null_exception or
3224 // continuation_for_implicit_division_by_zero_exception. All other
3225 // implicit exceptions (e.g., NullPointerException or
3226 // AbstractMethodError on entry) are either at call sites or
3227 // otherwise assume that stack unwinding will be initiated, so
3228 // caller saved registers were assumed volatile in the compiler.
3229 
3230 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
3231   assert(is_throw_id(id), "expected a throw stub id");
3232 
3233   const char* name = SharedRuntime::stub_name(id);
< prev index next >