< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"

  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"

 342       break;
 343     case T_DOUBLE:
 344       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 345       if (fp_args < Argument::n_float_register_parameters_j) {
 346         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 347       } else {
 348         stk_args = align_up(stk_args, 2);
 349         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 350         stk_args += 2;
 351       }
 352       break;
 353     default:
 354       ShouldNotReachHere();
 355       break;
 356     }
 357   }
 358 
 359   return stk_args;
 360 }
 361 















































































 362 // Patch the callers callsite with entry to compiled code if it exists.
 363 static void patch_callers_callsite(MacroAssembler *masm) {
 364   Label L;
 365   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 366   __ cbz(rscratch1, L);
 367 
 368   __ enter();
 369   __ push_CPU_state();
 370 
 371   // VM needs caller's callsite
 372   // VM needs target method
 373   // This needs to be a long call since we will relocate this adapter to
 374   // the codeBuffer and it may not reach
 375 
 376 #ifndef PRODUCT
 377   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 378 #endif
 379 
 380   __ mov(c_rarg0, rmethod);
 381   __ mov(c_rarg1, lr);
 382   __ authenticate_return_address(c_rarg1);
 383   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 384   __ blr(rscratch1);
 385 
 386   // Explicit isb required because fixup_callers_callsite may change the code
 387   // stream.
 388   __ safepoint_isb();
 389 
 390   __ pop_CPU_state();
 391   // restore sp
 392   __ leave();
 393   __ bind(L);
 394 }
 395 












































































































 396 static void gen_c2i_adapter(MacroAssembler *masm,
 397                             int total_args_passed,
 398                             int comp_args_on_stack,
 399                             const BasicType *sig_bt,
 400                             const VMRegPair *regs,
 401                             Label& skip_fixup) {



























 402   // Before we get into the guts of the C2I adapter, see if we should be here
 403   // at all.  We've come from compiled code and are attempting to jump to the
 404   // interpreter, which means the caller made a static call to get here
 405   // (vcalls always get a compiled target if there is one).  Check for a
 406   // compiled target.  If there is one, we need to patch the caller's call.
 407   patch_callers_callsite(masm);
 408 
 409   __ bind(skip_fixup);
 410 
 411   int words_pushed = 0;





















 412 
 413   // Since all args are passed on the stack, total_args_passed *
 414   // Interpreter::stackElementSize is the space we need.
 415 
 416   int extraspace = total_args_passed * Interpreter::stackElementSize;

 417 
 418   __ mov(r19_sender_sp, sp);


 419 
 420   // stack is aligned, keep it that way
 421   extraspace = align_up(extraspace, 2*wordSize);

 422 
 423   if (extraspace)
 424     __ sub(sp, sp, extraspace);
 425 
 426   // Now write the args into the outgoing interpreter space
 427   for (int i = 0; i < total_args_passed; i++) {
 428     if (sig_bt[i] == T_VOID) {
 429       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 430       continue;
 431     }
 432 
 433     // offset to start parameters
 434     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 435     int next_off = st_off - Interpreter::stackElementSize;
 436 
 437     // Say 4 args:
 438     // i   st_off
 439     // 0   32 T_LONG
 440     // 1   24 T_VOID
 441     // 2   16 T_OBJECT
 442     // 3    8 T_BOOL
 443     // -    0 return address
 444     //
 445     // However to make thing extra confusing. Because we can fit a Java long/double in
 446     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 447     // leaves one slot empty and only stores to a single slot. In this case the
 448     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 449 
 450     VMReg r_1 = regs[i].first();
 451     VMReg r_2 = regs[i].second();
 452     if (!r_1->is_valid()) {
 453       assert(!r_2->is_valid(), "");
 454       continue;




 455     }
 456     if (r_1->is_stack()) {
 457       // memory to memory use rscratch1
 458       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 459                     + extraspace
 460                     + words_pushed * wordSize);
 461       if (!r_2->is_valid()) {
 462         // sign extend??
 463         __ ldrw(rscratch1, Address(sp, ld_off));
 464         __ str(rscratch1, Address(sp, st_off));
 465 
 466       } else {




 467 
 468         __ ldr(rscratch1, Address(sp, ld_off));

 469 
 470         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 471         // T_DOUBLE and T_LONG use two slots in the interpreter
 472         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 473           // ld_off == LSW, ld_off+wordSize == MSW
 474           // st_off == MSW, next_off == LSW
 475           __ str(rscratch1, Address(sp, next_off));

























 476 #ifdef ASSERT
 477           // Overwrite the unused slot with known junk
 478           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 479           __ str(rscratch1, Address(sp, st_off));
 480 #endif /* ASSERT */
 481         } else {
 482           __ str(rscratch1, Address(sp, st_off));
 483         }
 484       }
 485     } else if (r_1->is_Register()) {
 486       Register r = r_1->as_Register();
 487       if (!r_2->is_valid()) {
 488         // must be only an int (or less ) so move only 32bits to slot
 489         // why not sign extend??
 490         __ str(r, Address(sp, st_off));
 491       } else {
 492         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 493         // T_DOUBLE and T_LONG use two slots in the interpreter
 494         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 495           // jlong/double in gpr
 496 #ifdef ASSERT
 497           // Overwrite the unused slot with known junk
 498           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 499           __ str(rscratch1, Address(sp, st_off));
 500 #endif /* ASSERT */
 501           __ str(r, Address(sp, next_off));























 502         } else {
 503           __ str(r, Address(sp, st_off));





















 504         }
 505       }
 506     } else {
 507       assert(r_1->is_FloatRegister(), "");
 508       if (!r_2->is_valid()) {
 509         // only a float use just part of the slot
 510         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 511       } else {
 512 #ifdef ASSERT
 513         // Overwrite the unused slot with known junk
 514         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 515         __ str(rscratch1, Address(sp, st_off));
 516 #endif /* ASSERT */
 517         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 518       }
 519     }
 520   }
 521 
 522   __ mov(esp, sp); // Interp expects args on caller's expression stack
 523 
 524   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 525   __ br(rscratch1);
 526 }
 527 

 528 
 529 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 530                                     int total_args_passed,
 531                                     int comp_args_on_stack,
 532                                     const BasicType *sig_bt,
 533                                     const VMRegPair *regs) {
 534 
 535   // Note: r19_sender_sp contains the senderSP on entry. We must
 536   // preserve it since we may do a i2c -> c2i transition if we lose a
 537   // race where compiled code goes non-entrant while we get args
 538   // ready.
 539 
 540   // Adapters are frameless.
 541 
 542   // An i2c adapter is frameless because the *caller* frame, which is
 543   // interpreted, routinely repairs its own esp (from
 544   // interpreter_frame_last_sp), even if a callee has modified the
 545   // stack pointer.  It also recalculates and aligns sp.
 546 
 547   // A c2i adapter is frameless because the *callee* frame, which is
 548   // interpreted, routinely repairs its caller's sp (from sender_sp,
 549   // which is set up via the senderSP register).
 550 
 551   // In other words, if *either* the caller or callee is interpreted, we can
 552   // get the stack pointer repaired after a call.
 553 

 576       range_check(masm, rax, r11,
 577                   StubRoutines::initial_stubs_code()->code_begin(),
 578                   StubRoutines::initial_stubs_code()->code_end(),
 579                   L_ok);
 580     }
 581     if (StubRoutines::final_stubs_code() != nullptr) {
 582       range_check(masm, rax, r11,
 583                   StubRoutines::final_stubs_code()->code_begin(),
 584                   StubRoutines::final_stubs_code()->code_end(),
 585                   L_ok);
 586     }
 587     const char* msg = "i2c adapter must return to an interpreter frame";
 588     __ block_comment(msg);
 589     __ stop(msg);
 590     __ bind(L_ok);
 591     __ block_comment("} verify_i2ce ");
 592 #endif
 593   }
 594 
 595   // Cut-out for having no stack args.
 596   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 597   if (comp_args_on_stack) {
 598     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 599     __ andr(sp, rscratch1, -16);

 600   }
 601 
 602   // Will jump to the compiled code just as if compiled code was doing it.
 603   // Pre-load the register-jump target early, to schedule it better.
 604   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 605 
 606 #if INCLUDE_JVMCI
 607   if (EnableJVMCI) {
 608     // check if this call should be routed towards a specific entry point
 609     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 610     Label no_alternative_target;
 611     __ cbz(rscratch2, no_alternative_target);
 612     __ mov(rscratch1, rscratch2);
 613     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 614     __ bind(no_alternative_target);
 615   }
 616 #endif // INCLUDE_JVMCI
 617 


 618   // Now generate the shuffle code.
 619   for (int i = 0; i < total_args_passed; i++) {
 620     if (sig_bt[i] == T_VOID) {
 621       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 622       continue;
 623     }
 624 
 625     // Pick up 0, 1 or 2 words from SP+offset.

 626 
 627     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 628             "scrambled load targets?");
 629     // Load in argument order going down.
 630     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 631     // Point to interpreter value (vs. tag)
 632     int next_off = ld_off - Interpreter::stackElementSize;
 633     //
 634     //
 635     //
 636     VMReg r_1 = regs[i].first();
 637     VMReg r_2 = regs[i].second();
 638     if (!r_1->is_valid()) {
 639       assert(!r_2->is_valid(), "");
 640       continue;
 641     }
 642     if (r_1->is_stack()) {
 643       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 644       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 645       if (!r_2->is_valid()) {
 646         // sign extend???
 647         __ ldrsw(rscratch2, Address(esp, ld_off));
 648         __ str(rscratch2, Address(sp, st_off));
 649       } else {
 650         //
 651         // We are using two optoregs. This can be either T_OBJECT,
 652         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 653         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 654         // So we must adjust where to pick up the data to match the
 655         // interpreter.
 656         //
 657         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 658         // are accessed as negative so LSW is at LOW address
 659 
 660         // ld_off is MSW so get LSW
 661         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 662                            next_off : ld_off;
 663         __ ldr(rscratch2, Address(esp, offset));
 664         // st_off is LSW (i.e. reg.first())
 665         __ str(rscratch2, Address(sp, st_off));
 666       }
 667     } else if (r_1->is_Register()) {  // Register argument
 668       Register r = r_1->as_Register();
 669       if (r_2->is_valid()) {
 670         //
 671         // We are using two VMRegs. This can be either T_OBJECT,
 672         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 673         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 674         // So we must adjust where to pick up the data to match the
 675         // interpreter.
 676 
 677         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 678                            next_off : ld_off;














 679 
 680         // this can be a misaligned move
 681         __ ldr(r, Address(esp, offset));
 682       } else {
 683         // sign extend and use a full word?
 684         __ ldrw(r, Address(esp, ld_off));
 685       }
 686     } else {
 687       if (!r_2->is_valid()) {
 688         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 689       } else {
 690         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 691       }
 692     }
 693   }
 694 
 695   __ mov(rscratch2, rscratch1);
 696   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 697   __ mov(rscratch1, rscratch2);
 698 
 699   // 6243940 We might end up in handle_wrong_method if
 700   // the callee is deoptimized as we race thru here. If that
 701   // happens we don't want to take a safepoint because the
 702   // caller frame will look interpreted and arguments are now
 703   // "compiled" so it is much better to make this transition
 704   // invisible to the stack walking code. Unfortunately if
 705   // we try and find the callee by normal means a safepoint
 706   // is possible. So we stash the desired callee in the thread
 707   // and the vm will find there should this case occur.
 708 
 709   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 710 
 711   __ br(rscratch1);
 712 }
 713 
 714 // ---------------------------------------------------------------
 715 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 716                                                             int total_args_passed,
 717                                                             int comp_args_on_stack,
 718                                                             const BasicType *sig_bt,
 719                                                             const VMRegPair *regs,
 720                                                             AdapterFingerPrint* fingerprint) {
 721   address i2c_entry = __ pc();
 722 
 723   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);






 724 
 725   address c2i_unverified_entry = __ pc();
 726   Label skip_fixup;










 727 
 728   Register data = rscratch2;
 729   Register receiver = j_rarg0;
 730   Register tmp = r10;  // A call-clobbered register not used for arg passing
 731 
 732   // -------------------------------------------------------------------------
 733   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 734   // to the interpreter.  The args start out packed in the compiled layout.  They
 735   // need to be unpacked into the interpreter layout.  This will almost always
 736   // require some stack space.  We grow the current (compiled) stack, then repack
 737   // the args.  We  finally end in a jump to the generic interpreter entry point.
 738   // On exit from the interpreter, the interpreter will restore our SP (lest the
 739   // compiled code, which relies solely on SP and not FP, get sick).
 740 
 741   {
 742     __ block_comment("c2i_unverified_entry {");
 743     // Method might have been compiled since the call site was patched to
 744     // interpreted; if that is the case treat it as a miss so we can get
 745     // the call site corrected.
 746     __ ic_check(1 /* end_alignment */);
 747     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 748 
 749     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 750     __ cbz(rscratch1, skip_fixup);
 751     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 752     __ block_comment("} c2i_unverified_entry");
 753   }
 754 
 755   address c2i_entry = __ pc();


 756 
 757   // Class initialization barrier for static methods
 758   address c2i_no_clinit_check_entry = nullptr;
 759   if (VM_Version::supports_fast_class_init_checks()) {
 760     Label L_skip_barrier;
 761 
 762     { // Bypass the barrier for non-static methods
 763       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 764       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 765       __ br(Assembler::EQ, L_skip_barrier); // non-static
 766     }
 767 
 768     __ load_method_holder(rscratch2, rmethod);
 769     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 770     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 771 
 772     __ bind(L_skip_barrier);
 773     c2i_no_clinit_check_entry = __ pc();













 774   }
 775 
 776   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 777   bs->c2i_entry_barrier(masm);
 778 
 779   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





 780 
 781   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 782 }
 783 
 784 static int c_calling_convention_priv(const BasicType *sig_bt,
 785                                          VMRegPair *regs,
 786                                          int total_args_passed) {
 787 
 788 // We return the amount of VMRegImpl stack slots we need to reserve for all
 789 // the arguments NOT counting out_preserve_stack_slots.
 790 
 791     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 792       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 793     };
 794     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 795       c_farg0, c_farg1, c_farg2, c_farg3,
 796       c_farg4, c_farg5, c_farg6, c_farg7
 797     };
 798 
 799     uint int_args = 0;
 800     uint fp_args = 0;
 801     uint stk_args = 0; // inc by 2 each time

1774   if (method->is_synchronized()) {
1775     Label count;
1776     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1777 
1778     // Get the handle (the 2nd argument)
1779     __ mov(oop_handle_reg, c_rarg1);
1780 
1781     // Get address of the box
1782 
1783     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1784 
1785     // Load the oop from the handle
1786     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1787 
1788     if (LockingMode == LM_MONITOR) {
1789       __ b(slow_path_lock);
1790     } else if (LockingMode == LM_LEGACY) {
1791       // Load (object->mark() | 1) into swap_reg %r0
1792       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1793       __ orr(swap_reg, rscratch1, 1);




1794 
1795       // Save (object->mark() | 1) into BasicLock's displaced header
1796       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1797 
1798       // src -> dest iff dest == r0 else r0 <- dest
1799       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1800 
1801       // Hmm should this move to the slow path code area???
1802 
1803       // Test if the oopMark is an obvious stack pointer, i.e.,
1804       //  1) (mark & 3) == 0, and
1805       //  2) sp <= mark < mark + os::pagesize()
1806       // These 3 tests can be done by evaluating the following
1807       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1808       // assuming both stack pointer and pagesize have their
1809       // least significant 2 bits clear.
1810       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1811 
1812       __ sub(swap_reg, sp, swap_reg);
1813       __ neg(swap_reg, swap_reg);

2761   __ bind(pending);
2762 
2763   reg_save.restore_live_registers(masm);
2764 
2765   // exception pending => remove activation and forward to exception handler
2766 
2767   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
2768 
2769   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
2770   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2771 
2772   // -------------
2773   // make sure all code is generated
2774   masm->flush();
2775 
2776   // return the  blob
2777   // frame_size_words or bytes??
2778   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
2779 }
2780 

















































































































2781 // Continuation point for throwing of implicit exceptions that are
2782 // not handled in the current activation. Fabricates an exception
2783 // oop and initiates normal exception dispatching in this
2784 // frame. Since we need to preserve callee-saved values (currently
2785 // only for C2, but done for C1 as well) we need a callee-saved oop
2786 // map and therefore have to make these stubs into RuntimeStubs
2787 // rather than BufferBlobs.  If the compiler needs all registers to
2788 // be preserved between the fault point and the exception handler
2789 // then it must assume responsibility for that in
2790 // AbstractCompiler::continuation_for_implicit_null_exception or
2791 // continuation_for_implicit_division_by_zero_exception. All other
2792 // implicit exceptions (e.g., NullPointerException or
2793 // AbstractMethodError on entry) are either at call sites or
2794 // otherwise assume that stack unwinding will be initiated, so
2795 // caller saved registers were assumed volatile in the compiler.
2796 
2797 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
2798   assert(is_throw_id(id), "expected a throw stub id");
2799 
2800   const char* name = SharedRuntime::stub_name(id);

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"

 343       break;
 344     case T_DOUBLE:
 345       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 346       if (fp_args < Argument::n_float_register_parameters_j) {
 347         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 348       } else {
 349         stk_args = align_up(stk_args, 2);
 350         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 351         stk_args += 2;
 352       }
 353       break;
 354     default:
 355       ShouldNotReachHere();
 356       break;
 357     }
 358   }
 359 
 360   return stk_args;
 361 }
 362 
 363 
 364 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 365 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 366 
 367 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 368 
 369   // Create the mapping between argument positions and registers.
 370 
 371   static const Register INT_ArgReg[java_return_convention_max_int] = {
 372     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 373   };
 374 
 375   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 376     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 377   };
 378 
 379   uint int_args = 0;
 380   uint fp_args = 0;
 381 
 382   for (int i = 0; i < total_args_passed; i++) {
 383     switch (sig_bt[i]) {
 384     case T_BOOLEAN:
 385     case T_CHAR:
 386     case T_BYTE:
 387     case T_SHORT:
 388     case T_INT:
 389       if (int_args < SharedRuntime::java_return_convention_max_int) {
 390         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 391         int_args ++;
 392       } else {
 393         return -1;
 394       }
 395       break;
 396     case T_VOID:
 397       // halves of T_LONG or T_DOUBLE
 398       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 399       regs[i].set_bad();
 400       break;
 401     case T_LONG:
 402       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 403       // fall through
 404     case T_OBJECT:
 405     case T_ARRAY:
 406     case T_ADDRESS:
 407       // Should T_METADATA be added to java_calling_convention as well ?
 408     case T_METADATA:
 409       if (int_args < SharedRuntime::java_return_convention_max_int) {
 410         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 411         int_args ++;
 412       } else {
 413         return -1;
 414       }
 415       break;
 416     case T_FLOAT:
 417       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 418         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 419         fp_args ++;
 420       } else {
 421         return -1;
 422       }
 423       break;
 424     case T_DOUBLE:
 425       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 426       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 427         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 428         fp_args ++;
 429       } else {
 430         return -1;
 431       }
 432       break;
 433     default:
 434       ShouldNotReachHere();
 435       break;
 436     }
 437   }
 438 
 439   return int_args + fp_args;
 440 }
 441 
 442 // Patch the callers callsite with entry to compiled code if it exists.
 443 static void patch_callers_callsite(MacroAssembler *masm) {
 444   Label L;
 445   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 446   __ cbz(rscratch1, L);
 447 
 448   __ enter();
 449   __ push_CPU_state();
 450 
 451   // VM needs caller's callsite
 452   // VM needs target method
 453   // This needs to be a long call since we will relocate this adapter to
 454   // the codeBuffer and it may not reach
 455 
 456 #ifndef PRODUCT
 457   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 458 #endif
 459 
 460   __ mov(c_rarg0, rmethod);
 461   __ mov(c_rarg1, lr);
 462   __ authenticate_return_address(c_rarg1);
 463   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 464   __ blr(rscratch1);
 465 
 466   // Explicit isb required because fixup_callers_callsite may change the code
 467   // stream.
 468   __ safepoint_isb();
 469 
 470   __ pop_CPU_state();
 471   // restore sp
 472   __ leave();
 473   __ bind(L);
 474 }
 475 
 476 // For each inline type argument, sig includes the list of fields of
 477 // the inline type. This utility function computes the number of
 478 // arguments for the call if inline types are passed by reference (the
 479 // calling convention the interpreter expects).
 480 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 481   int total_args_passed = 0;
 482   if (InlineTypePassFieldsAsArgs) {
 483      for (int i = 0; i < sig_extended->length(); i++) {
 484        BasicType bt = sig_extended->at(i)._bt;
 485        if (bt == T_METADATA) {
 486          // In sig_extended, an inline type argument starts with:
 487          // T_METADATA, followed by the types of the fields of the
 488          // inline type and T_VOID to mark the end of the value
 489          // type. Inline types are flattened so, for instance, in the
 490          // case of an inline type with an int field and an inline type
 491          // field that itself has 2 fields, an int and a long:
 492          // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 493          // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 494          // (outer inline type)
 495          total_args_passed++;
 496          int vt = 1;
 497          do {
 498            i++;
 499            BasicType bt = sig_extended->at(i)._bt;
 500            BasicType prev_bt = sig_extended->at(i-1)._bt;
 501            if (bt == T_METADATA) {
 502              vt++;
 503            } else if (bt == T_VOID &&
 504                       prev_bt != T_LONG &&
 505                       prev_bt != T_DOUBLE) {
 506              vt--;
 507            }
 508          } while (vt != 0);
 509        } else {
 510          total_args_passed++;
 511        }
 512      }
 513   } else {
 514     total_args_passed = sig_extended->length();
 515   }
 516 
 517   return total_args_passed;
 518 }
 519 
 520 
 521 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 522                                    BasicType bt,
 523                                    BasicType prev_bt,
 524                                    size_t size_in_bytes,
 525                                    const VMRegPair& reg_pair,
 526                                    const Address& to,
 527                                    Register tmp1,
 528                                    Register tmp2,
 529                                    Register tmp3,
 530                                    int extraspace,
 531                                    bool is_oop) {
 532   if (bt == T_VOID) {
 533     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 534     return;
 535   }
 536 
 537   // Say 4 args:
 538   // i   st_off
 539   // 0   32 T_LONG
 540   // 1   24 T_VOID
 541   // 2   16 T_OBJECT
 542   // 3    8 T_BOOL
 543   // -    0 return address
 544   //
 545   // However to make thing extra confusing. Because we can fit a Java long/double in
 546   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 547   // leaves one slot empty and only stores to a single slot. In this case the
 548   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 549 
 550   bool wide = (size_in_bytes == wordSize);
 551   VMReg r_1 = reg_pair.first();
 552   VMReg r_2 = reg_pair.second();
 553   assert(r_2->is_valid() == wide, "invalid size");
 554   if (!r_1->is_valid()) {
 555     assert(!r_2->is_valid(), "");
 556     return;
 557   }
 558 
 559   if (!r_1->is_FloatRegister()) {
 560     Register val = r25;
 561     if (r_1->is_stack()) {
 562       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 563       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 564       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 565     } else {
 566       val = r_1->as_Register();
 567     }
 568     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 569     if (is_oop) {
 570       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 571     } else {
 572       __ store_sized_value(to, val, size_in_bytes);
 573     }
 574   } else {
 575     if (wide) {
 576       __ strd(r_1->as_FloatRegister(), to);
 577     } else {
 578       // only a float use just part of the slot
 579       __ strs(r_1->as_FloatRegister(), to);
 580     }
 581   }
 582 }
 583 
 584 static void gen_c2i_adapter(MacroAssembler *masm,
 585                             const GrowableArray<SigEntry>* sig_extended,


 586                             const VMRegPair *regs,
 587                             bool requires_clinit_barrier,
 588                             address& c2i_no_clinit_check_entry,
 589                             Label& skip_fixup,
 590                             address start,
 591                             OopMapSet* oop_maps,
 592                             int& frame_complete,
 593                             int& frame_size_in_words,
 594                             bool alloc_inline_receiver) {
 595   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 596     Label L_skip_barrier;
 597 
 598     { // Bypass the barrier for non-static methods
 599       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 600       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 601       __ br(Assembler::EQ, L_skip_barrier); // non-static
 602     }
 603 
 604     __ load_method_holder(rscratch2, rmethod);
 605     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 606     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 607 
 608     __ bind(L_skip_barrier);
 609     c2i_no_clinit_check_entry = __ pc();
 610   }
 611 
 612   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 613   bs->c2i_entry_barrier(masm);
 614 
 615   // Before we get into the guts of the C2I adapter, see if we should be here
 616   // at all.  We've come from compiled code and are attempting to jump to the
 617   // interpreter, which means the caller made a static call to get here
 618   // (vcalls always get a compiled target if there is one).  Check for a
 619   // compiled target.  If there is one, we need to patch the caller's call.
 620   patch_callers_callsite(masm);
 621 
 622   __ bind(skip_fixup);
 623 
 624   // Name some registers to be used in the following code. We can use
 625   // anything except r0-r7 which are arguments in the Java calling
 626   // convention, rmethod (r12), and r13 which holds the outgoing sender
 627   // SP for the interpreter.
 628   Register buf_array = r10;   // Array of buffered inline types
 629   Register buf_oop = r11;     // Buffered inline type oop
 630   Register tmp1 = r15;
 631   Register tmp2 = r16;
 632   Register tmp3 = r17;
 633 
 634   if (InlineTypePassFieldsAsArgs) {
 635     // Is there an inline type argument?
 636     bool has_inline_argument = false;
 637     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 638       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 639     }
 640     if (has_inline_argument) {
 641       // There is at least an inline type argument: we're coming from
 642       // compiled code so we have no buffers to back the inline types
 643       // Allocate the buffers here with a runtime call.
 644       RegisterSaver reg_save(false /* save_vectors */);
 645       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 646 
 647       frame_complete = __ offset();
 648       address the_pc = __ pc();
 649 
 650       Label retaddr;
 651       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 652 
 653       __ mov(c_rarg0, rthread);
 654       __ mov(c_rarg1, rmethod);
 655       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 656 
 657       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 658       __ blr(rscratch1);
 659       __ bind(retaddr);
 660 
 661       oop_maps->add_gc_map(__ pc() - start, map);
 662       __ reset_last_Java_frame(false);
 663 
 664       reg_save.restore_live_registers(masm);





 665 
 666       Label no_exception;
 667       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 668       __ cbz(rscratch1, no_exception);













 669 
 670       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 671       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 672       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 673 
 674       __ bind(no_exception);
 675 
 676       // We get an array of objects from the runtime call
 677       __ get_vm_result(buf_array, rthread);
 678       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 679     }
 680   }








 681 
 682   // Since all args are passed on the stack, total_args_passed *
 683   // Interpreter::stackElementSize is the space we need.
 684 
 685   int total_args_passed = compute_total_args_passed_int(sig_extended);
 686   int extraspace = total_args_passed * Interpreter::stackElementSize;
 687 
 688   // stack is aligned, keep it that way
 689   extraspace = align_up(extraspace, StackAlignmentInBytes);
 690 
 691   // set senderSP value
 692   __ mov(r19_sender_sp, sp);
 693 
 694   __ sub(sp, sp, extraspace);
 695 
 696   // Now write the args into the outgoing interpreter space
 697 
 698   // next_arg_comp is the next argument from the compiler point of
 699   // view (inline type fields are passed in registers/on the stack). In
 700   // sig_extended, an inline type argument starts with: T_METADATA,
 701   // followed by the types of the fields of the inline type and T_VOID
 702   // to mark the end of the inline type. ignored counts the number of
 703   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 704   // used to get the buffer for that argument from the pool of buffers
 705   // we allocated above and want to pass to the
 706   // interpreter. next_arg_int is the next argument from the
 707   // interpreter point of view (inline types are passed by reference).
 708   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 709        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 710     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 711     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 712     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 713     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 714     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 715       int next_off = st_off - Interpreter::stackElementSize;
 716       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 717       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 718       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 719       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 720                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 721       next_arg_int++;
 722 #ifdef ASSERT
 723       if (bt == T_LONG || bt == T_DOUBLE) {
 724         // Overwrite the unused slot with known junk
 725         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 726         __ str(rscratch1, Address(sp, st_off));



 727       }















 728 #endif /* ASSERT */
 729     } else {
 730       ignored++;
 731       // get the buffer from the just allocated pool of buffers
 732       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 733       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 734       next_vt_arg++; next_arg_int++;
 735       int vt = 1;
 736       // write fields we get from compiled code in registers/stack
 737       // slots to the buffer: we know we are done with that inline type
 738       // argument when we hit the T_VOID that acts as an end of inline
 739       // type delimiter for this inline type. Inline types are flattened
 740       // so we might encounter embedded inline types. Each entry in
 741       // sig_extended contains a field offset in the buffer.
 742       Label L_null;
 743       do {
 744         next_arg_comp++;
 745         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 746         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 747         if (bt == T_METADATA) {
 748           vt++;
 749           ignored++;
 750         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 751           vt--;
 752           ignored++;
 753         } else {
 754           int off = sig_extended->at(next_arg_comp)._offset;
 755           if (off == -1) {
 756             // Nullable inline type argument, emit null check
 757             VMReg reg = regs[next_arg_comp-ignored].first();
 758             Label L_notNull;
 759             if (reg->is_stack()) {
 760               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 761               __ ldrb(tmp1, Address(sp, ld_off));
 762               __ cbnz(tmp1, L_notNull);
 763             } else {
 764               __ cbnz(reg->as_Register(), L_notNull);
 765             }
 766             __ str(zr, Address(sp, st_off));
 767             __ b(L_null);
 768             __ bind(L_notNull);
 769             continue;
 770           }
 771           assert(off > 0, "offset in object should be positive");
 772           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 773           bool is_oop = is_reference_type(bt);
 774           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 775                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 776         }
 777       } while (vt != 0);
 778       // pass the buffer to the interpreter
 779       __ str(buf_oop, Address(sp, st_off));
 780       __ bind(L_null);










 781     }
 782   }
 783 
 784   __ mov(esp, sp); // Interp expects args on caller's expression stack
 785 
 786   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 787   __ br(rscratch1);
 788 }
 789 
 790 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 791 





 792 
 793   // Note: r19_sender_sp contains the senderSP on entry. We must
 794   // preserve it since we may do a i2c -> c2i transition if we lose a
 795   // race where compiled code goes non-entrant while we get args
 796   // ready.
 797 
 798   // Adapters are frameless.
 799 
 800   // An i2c adapter is frameless because the *caller* frame, which is
 801   // interpreted, routinely repairs its own esp (from
 802   // interpreter_frame_last_sp), even if a callee has modified the
 803   // stack pointer.  It also recalculates and aligns sp.
 804 
 805   // A c2i adapter is frameless because the *callee* frame, which is
 806   // interpreted, routinely repairs its caller's sp (from sender_sp,
 807   // which is set up via the senderSP register).
 808 
 809   // In other words, if *either* the caller or callee is interpreted, we can
 810   // get the stack pointer repaired after a call.
 811 

 834       range_check(masm, rax, r11,
 835                   StubRoutines::initial_stubs_code()->code_begin(),
 836                   StubRoutines::initial_stubs_code()->code_end(),
 837                   L_ok);
 838     }
 839     if (StubRoutines::final_stubs_code() != nullptr) {
 840       range_check(masm, rax, r11,
 841                   StubRoutines::final_stubs_code()->code_begin(),
 842                   StubRoutines::final_stubs_code()->code_end(),
 843                   L_ok);
 844     }
 845     const char* msg = "i2c adapter must return to an interpreter frame";
 846     __ block_comment(msg);
 847     __ stop(msg);
 848     __ bind(L_ok);
 849     __ block_comment("} verify_i2ce ");
 850 #endif
 851   }
 852 
 853   // Cut-out for having no stack args.
 854   int comp_words_on_stack = 0;
 855   if (comp_args_on_stack) {
 856      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 857      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 858      __ andr(sp, rscratch1, -16);
 859   }
 860 
 861   // Will jump to the compiled code just as if compiled code was doing it.
 862   // Pre-load the register-jump target early, to schedule it better.
 863   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 864 
 865 #if INCLUDE_JVMCI
 866   if (EnableJVMCI) {
 867     // check if this call should be routed towards a specific entry point
 868     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 869     Label no_alternative_target;
 870     __ cbz(rscratch2, no_alternative_target);
 871     __ mov(rscratch1, rscratch2);
 872     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 873     __ bind(no_alternative_target);
 874   }
 875 #endif // INCLUDE_JVMCI
 876 
 877   int total_args_passed = sig->length();
 878 
 879   // Now generate the shuffle code.
 880   for (int i = 0; i < total_args_passed; i++) {
 881     BasicType bt = sig->at(i)._bt;
 882     if (bt == T_VOID) {
 883       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 884       continue;
 885     }
 886 
 887     // Pick up 0, 1 or 2 words from SP+offset.
 888     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 889 


 890     // Load in argument order going down.
 891     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 892     // Point to interpreter value (vs. tag)
 893     int next_off = ld_off - Interpreter::stackElementSize;
 894     //
 895     //
 896     //
 897     VMReg r_1 = regs[i].first();
 898     VMReg r_2 = regs[i].second();
 899     if (!r_1->is_valid()) {
 900       assert(!r_2->is_valid(), "");
 901       continue;
 902     }
 903     if (r_1->is_stack()) {
 904       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 905       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 906       if (!r_2->is_valid()) {
 907         // sign extend???
 908         __ ldrsw(rscratch2, Address(esp, ld_off));
 909         __ str(rscratch2, Address(sp, st_off));
 910       } else {
 911         //
 912         // We are using two optoregs. This can be either T_OBJECT,
 913         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 914         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 915         // So we must adjust where to pick up the data to match the
 916         // interpreter.
 917         //
 918         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 919         // are accessed as negative so LSW is at LOW address
 920 
 921         // ld_off is MSW so get LSW
 922         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 923         __ ldr(rscratch2, Address(esp, offset));
 924         // st_off is LSW (i.e. reg.first())
 925          __ str(rscratch2, Address(sp, st_off));
 926        }
 927      } else if (r_1->is_Register()) {  // Register argument
 928        Register r = r_1->as_Register();
 929        if (r_2->is_valid()) {
 930          //
 931          // We are using two VMRegs. This can be either T_OBJECT,
 932          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 933          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 934          // So we must adjust where to pick up the data to match the
 935          // interpreter.
 936 
 937         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 938 
 939          // this can be a misaligned move
 940          __ ldr(r, Address(esp, offset));
 941        } else {
 942          // sign extend and use a full word?
 943          __ ldrw(r, Address(esp, ld_off));
 944        }
 945      } else {
 946        if (!r_2->is_valid()) {
 947          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 948        } else {
 949          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 950        }
 951      }
 952    }
 953 














 954 
 955   __ mov(rscratch2, rscratch1);
 956   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 957   __ mov(rscratch1, rscratch2);
 958 
 959   // 6243940 We might end up in handle_wrong_method if
 960   // the callee is deoptimized as we race thru here. If that
 961   // happens we don't want to take a safepoint because the
 962   // caller frame will look interpreted and arguments are now
 963   // "compiled" so it is much better to make this transition
 964   // invisible to the stack walking code. Unfortunately if
 965   // we try and find the callee by normal means a safepoint
 966   // is possible. So we stash the desired callee in the thread
 967   // and the vm will find there should this case occur.
 968 
 969   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 970   __ br(rscratch1);
 971 }
 972 
 973 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 974   Register data = rscratch2;
 975   __ ic_check(1 /* end_alignment */);
 976   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));




 977 
 978   // Method might have been compiled since the call site was patched to
 979   // interpreted; if that is the case treat it as a miss so we can get
 980   // the call site corrected.
 981   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 982   __ cbz(rscratch1, skip_fixup);
 983   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 984 }
 985 
 986 // ---------------------------------------------------------------
 987 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
 988                                                             int comp_args_on_stack,
 989                                                             const GrowableArray<SigEntry>* sig,
 990                                                             const VMRegPair* regs,
 991                                                             const GrowableArray<SigEntry>* sig_cc,
 992                                                             const VMRegPair* regs_cc,
 993                                                             const GrowableArray<SigEntry>* sig_cc_ro,
 994                                                             const VMRegPair* regs_cc_ro,
 995                                                             AdapterFingerPrint* fingerprint,
 996                                                             AdapterBlob*& new_adapter,
 997                                                             bool allocate_code_blob) {
 998 
 999   address i2c_entry = __ pc();
1000   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

1001 
1002   // -------------------------------------------------------------------------
1003   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
1004   // to the interpreter.  The args start out packed in the compiled layout.  They
1005   // need to be unpacked into the interpreter layout.  This will almost always
1006   // require some stack space.  We grow the current (compiled) stack, then repack
1007   // the args.  We  finally end in a jump to the generic interpreter entry point.
1008   // On exit from the interpreter, the interpreter will restore our SP (lest the
1009   // compiled code, which relies solely on SP and not FP, get sick).
1010 
1011   address c2i_unverified_entry        = __ pc();
1012   address c2i_unverified_inline_entry = __ pc();
1013   Label skip_fixup;




1014 
1015   gen_inline_cache_check(masm, skip_fixup);




1016 
1017   OopMapSet* oop_maps = new OopMapSet();
1018   int frame_complete = CodeOffsets::frame_never_safe;
1019   int frame_size_in_words = 0;
1020 
1021   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1022   address c2i_no_clinit_check_entry = nullptr;
1023   address c2i_inline_ro_entry = __ pc();
1024   if (regs_cc != regs_cc_ro) {
1025     // No class init barrier needed because method is guaranteed to be non-static
1026     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1027                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1028     skip_fixup.reset();
1029   }





1030 
1031   // Scalarized c2i adapter
1032   address c2i_entry        = __ pc();
1033   address c2i_inline_entry = __ pc();
1034   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1035                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1036 
1037   // Non-scalarized c2i adapter
1038   if (regs != regs_cc) {
1039     c2i_unverified_inline_entry = __ pc();
1040     Label inline_entry_skip_fixup;
1041     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1042 
1043     c2i_inline_entry = __ pc();
1044     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1045                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1046   }
1047 


1048 
1049   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1050   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1051   if (allocate_code_blob) {
1052     bool caller_must_gc_arguments = (regs != regs_cc);
1053     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1054   }
1055 
1056   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1057 }
1058 
1059 static int c_calling_convention_priv(const BasicType *sig_bt,
1060                                          VMRegPair *regs,
1061                                          int total_args_passed) {
1062 
1063 // We return the amount of VMRegImpl stack slots we need to reserve for all
1064 // the arguments NOT counting out_preserve_stack_slots.
1065 
1066     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1067       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1068     };
1069     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1070       c_farg0, c_farg1, c_farg2, c_farg3,
1071       c_farg4, c_farg5, c_farg6, c_farg7
1072     };
1073 
1074     uint int_args = 0;
1075     uint fp_args = 0;
1076     uint stk_args = 0; // inc by 2 each time

2049   if (method->is_synchronized()) {
2050     Label count;
2051     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2052 
2053     // Get the handle (the 2nd argument)
2054     __ mov(oop_handle_reg, c_rarg1);
2055 
2056     // Get address of the box
2057 
2058     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2059 
2060     // Load the oop from the handle
2061     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2062 
2063     if (LockingMode == LM_MONITOR) {
2064       __ b(slow_path_lock);
2065     } else if (LockingMode == LM_LEGACY) {
2066       // Load (object->mark() | 1) into swap_reg %r0
2067       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2068       __ orr(swap_reg, rscratch1, 1);
2069       if (EnableValhalla) {
2070         // Mask inline_type bit such that we go to the slow path if object is an inline type
2071         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2072       }
2073 
2074       // Save (object->mark() | 1) into BasicLock's displaced header
2075       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2076 
2077       // src -> dest iff dest == r0 else r0 <- dest
2078       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
2079 
2080       // Hmm should this move to the slow path code area???
2081 
2082       // Test if the oopMark is an obvious stack pointer, i.e.,
2083       //  1) (mark & 3) == 0, and
2084       //  2) sp <= mark < mark + os::pagesize()
2085       // These 3 tests can be done by evaluating the following
2086       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2087       // assuming both stack pointer and pagesize have their
2088       // least significant 2 bits clear.
2089       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2090 
2091       __ sub(swap_reg, sp, swap_reg);
2092       __ neg(swap_reg, swap_reg);

3040   __ bind(pending);
3041 
3042   reg_save.restore_live_registers(masm);
3043 
3044   // exception pending => remove activation and forward to exception handler
3045 
3046   __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
3047 
3048   __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
3049   __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3050 
3051   // -------------
3052   // make sure all code is generated
3053   masm->flush();
3054 
3055   // return the  blob
3056   // frame_size_words or bytes??
3057   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3058 }
3059 
3060 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3061   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3062   CodeBuffer buffer(buf);
3063   short buffer_locs[20];
3064   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3065                                          sizeof(buffer_locs)/sizeof(relocInfo));
3066 
3067   MacroAssembler _masm(&buffer);
3068   MacroAssembler* masm = &_masm;
3069 
3070   const Array<SigEntry>* sig_vk = vk->extended_sig();
3071   const Array<VMRegPair>* regs = vk->return_regs();
3072 
3073   int pack_fields_jobject_off = __ offset();
3074   // Resolve pre-allocated buffer from JNI handle.
3075   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3076   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3077   __ ldr(r0, Address(Rresult));
3078   __ resolve_jobject(r0 /* value */,
3079                      rthread /* thread */,
3080                      r12 /* tmp */);
3081   __ str(r0, Address(Rresult));
3082 
3083   int pack_fields_off = __ offset();
3084 
3085   int j = 1;
3086   for (int i = 0; i < sig_vk->length(); i++) {
3087     BasicType bt = sig_vk->at(i)._bt;
3088     if (bt == T_METADATA) {
3089       continue;
3090     }
3091     if (bt == T_VOID) {
3092       if (sig_vk->at(i-1)._bt == T_LONG ||
3093           sig_vk->at(i-1)._bt == T_DOUBLE) {
3094         j++;
3095       }
3096       continue;
3097     }
3098     int off = sig_vk->at(i)._offset;
3099     VMRegPair pair = regs->at(j);
3100     VMReg r_1 = pair.first();
3101     VMReg r_2 = pair.second();
3102     Address to(r0, off);
3103     if (bt == T_FLOAT) {
3104       __ strs(r_1->as_FloatRegister(), to);
3105     } else if (bt == T_DOUBLE) {
3106       __ strd(r_1->as_FloatRegister(), to);
3107     } else {
3108       Register val = r_1->as_Register();
3109       assert_different_registers(to.base(), val, r15, r16, r17);
3110       if (is_reference_type(bt)) {
3111         __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3112       } else {
3113         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3114       }
3115     }
3116     j++;
3117   }
3118   assert(j == regs->length(), "missed a field?");
3119 
3120   __ ret(lr);
3121 
3122   int unpack_fields_off = __ offset();
3123 
3124   Label skip;
3125   __ cbz(r0, skip);
3126 
3127   j = 1;
3128   for (int i = 0; i < sig_vk->length(); i++) {
3129     BasicType bt = sig_vk->at(i)._bt;
3130     if (bt == T_METADATA) {
3131       continue;
3132     }
3133     if (bt == T_VOID) {
3134       if (sig_vk->at(i-1)._bt == T_LONG ||
3135           sig_vk->at(i-1)._bt == T_DOUBLE) {
3136         j++;
3137       }
3138       continue;
3139     }
3140     int off = sig_vk->at(i)._offset;
3141     assert(off > 0, "offset in object should be positive");
3142     VMRegPair pair = regs->at(j);
3143     VMReg r_1 = pair.first();
3144     VMReg r_2 = pair.second();
3145     Address from(r0, off);
3146     if (bt == T_FLOAT) {
3147       __ ldrs(r_1->as_FloatRegister(), from);
3148     } else if (bt == T_DOUBLE) {
3149       __ ldrd(r_1->as_FloatRegister(), from);
3150     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3151       assert_different_registers(r0, r_1->as_Register());
3152       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3153     } else {
3154       assert(is_java_primitive(bt), "unexpected basic type");
3155       assert_different_registers(r0, r_1->as_Register());
3156 
3157       size_t size_in_bytes = type2aelembytes(bt);
3158       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3159     }
3160     j++;
3161   }
3162   assert(j == regs->length(), "missed a field?");
3163 
3164   __ bind(skip);
3165 
3166   __ ret(lr);
3167 
3168   __ flush();
3169 
3170   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3171 }
3172 
3173 // Continuation point for throwing of implicit exceptions that are
3174 // not handled in the current activation. Fabricates an exception
3175 // oop and initiates normal exception dispatching in this
3176 // frame. Since we need to preserve callee-saved values (currently
3177 // only for C2, but done for C1 as well) we need a callee-saved oop
3178 // map and therefore have to make these stubs into RuntimeStubs
3179 // rather than BufferBlobs.  If the compiler needs all registers to
3180 // be preserved between the fault point and the exception handler
3181 // then it must assume responsibility for that in
3182 // AbstractCompiler::continuation_for_implicit_null_exception or
3183 // continuation_for_implicit_division_by_zero_exception. All other
3184 // implicit exceptions (e.g., NullPointerException or
3185 // AbstractMethodError on entry) are either at call sites or
3186 // otherwise assume that stack unwinding will be initiated, so
3187 // caller saved registers were assumed volatile in the compiler.
3188 
3189 RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) {
3190   assert(is_throw_id(id), "expected a throw stub id");
3191 
3192   const char* name = SharedRuntime::stub_name(id);
< prev index next >