< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"

  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/klass.inline.hpp"
  42 #include "oops/method.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/continuation.hpp"
  45 #include "runtime/continuationEntry.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/jniHandles.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"

 355       break;
 356     case T_DOUBLE:
 357       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 358       if (fp_args < Argument::n_float_register_parameters_j) {
 359         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 360       } else {
 361         stk_args = align_up(stk_args, 2);
 362         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 363         stk_args += 2;
 364       }
 365       break;
 366     default:
 367       ShouldNotReachHere();
 368       break;
 369     }
 370   }
 371 
 372   return stk_args;
 373 }
 374 















































































 375 // Patch the callers callsite with entry to compiled code if it exists.
 376 static void patch_callers_callsite(MacroAssembler *masm) {
 377   Label L;
 378   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 379   __ cbz(rscratch1, L);
 380 
 381   __ enter();
 382   __ push_CPU_state();
 383 
 384   // VM needs caller's callsite
 385   // VM needs target method
 386   // This needs to be a long call since we will relocate this adapter to
 387   // the codeBuffer and it may not reach
 388 
 389 #ifndef PRODUCT
 390   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 391 #endif
 392 
 393   __ mov(c_rarg0, rmethod);
 394   __ mov(c_rarg1, lr);
 395   __ authenticate_return_address(c_rarg1);
 396   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 397   __ blr(rscratch1);
 398 
 399   // Explicit isb required because fixup_callers_callsite may change the code
 400   // stream.
 401   __ safepoint_isb();
 402 
 403   __ pop_CPU_state();
 404   // restore sp
 405   __ leave();
 406   __ bind(L);
 407 }
 408 












































































































 409 static void gen_c2i_adapter(MacroAssembler *masm,
 410                             int total_args_passed,
 411                             int comp_args_on_stack,
 412                             const BasicType *sig_bt,
 413                             const VMRegPair *regs,
 414                             Label& skip_fixup) {



























 415   // Before we get into the guts of the C2I adapter, see if we should be here
 416   // at all.  We've come from compiled code and are attempting to jump to the
 417   // interpreter, which means the caller made a static call to get here
 418   // (vcalls always get a compiled target if there is one).  Check for a
 419   // compiled target.  If there is one, we need to patch the caller's call.
 420   patch_callers_callsite(masm);
 421 
 422   __ bind(skip_fixup);
 423 
 424   int words_pushed = 0;





















 425 
 426   // Since all args are passed on the stack, total_args_passed *
 427   // Interpreter::stackElementSize is the space we need.
 428 
 429   int extraspace = total_args_passed * Interpreter::stackElementSize;

 430 
 431   __ mov(r19_sender_sp, sp);


 432 
 433   // stack is aligned, keep it that way
 434   extraspace = align_up(extraspace, 2*wordSize);

 435 
 436   if (extraspace)
 437     __ sub(sp, sp, extraspace);
 438 
 439   // Now write the args into the outgoing interpreter space
 440   for (int i = 0; i < total_args_passed; i++) {
 441     if (sig_bt[i] == T_VOID) {
 442       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 443       continue;
 444     }
 445 
 446     // offset to start parameters
 447     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 448     int next_off = st_off - Interpreter::stackElementSize;
 449 
 450     // Say 4 args:
 451     // i   st_off
 452     // 0   32 T_LONG
 453     // 1   24 T_VOID
 454     // 2   16 T_OBJECT
 455     // 3    8 T_BOOL
 456     // -    0 return address
 457     //
 458     // However to make thing extra confusing. Because we can fit a Java long/double in
 459     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 460     // leaves one slot empty and only stores to a single slot. In this case the
 461     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 462 
 463     VMReg r_1 = regs[i].first();
 464     VMReg r_2 = regs[i].second();
 465     if (!r_1->is_valid()) {
 466       assert(!r_2->is_valid(), "");
 467       continue;




 468     }
 469     if (r_1->is_stack()) {
 470       // memory to memory use rscratch1
 471       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 472                     + extraspace
 473                     + words_pushed * wordSize);
 474       if (!r_2->is_valid()) {
 475         // sign extend??
 476         __ ldrw(rscratch1, Address(sp, ld_off));
 477         __ str(rscratch1, Address(sp, st_off));
 478 
 479       } else {

 480 
 481         __ ldr(rscratch1, Address(sp, ld_off));




 482 
 483         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 484         // T_DOUBLE and T_LONG use two slots in the interpreter
 485         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 486           // ld_off == LSW, ld_off+wordSize == MSW
 487           // st_off == MSW, next_off == LSW
 488           __ str(rscratch1, Address(sp, next_off));

























 489 #ifdef ASSERT
 490           // Overwrite the unused slot with known junk
 491           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 492           __ str(rscratch1, Address(sp, st_off));
 493 #endif /* ASSERT */
 494         } else {
 495           __ str(rscratch1, Address(sp, st_off));
 496         }
 497       }
 498     } else if (r_1->is_Register()) {
 499       Register r = r_1->as_Register();
 500       if (!r_2->is_valid()) {
 501         // must be only an int (or less ) so move only 32bits to slot
 502         // why not sign extend??
 503         __ str(r, Address(sp, st_off));
 504       } else {
 505         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 506         // T_DOUBLE and T_LONG use two slots in the interpreter
 507         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 508           // jlong/double in gpr
 509 #ifdef ASSERT
 510           // Overwrite the unused slot with known junk
 511           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 512           __ str(rscratch1, Address(sp, st_off));
 513 #endif /* ASSERT */
 514           __ str(r, Address(sp, next_off));























 515         } else {
 516           __ str(r, Address(sp, st_off));





















 517         }
 518       }
 519     } else {
 520       assert(r_1->is_FloatRegister(), "");
 521       if (!r_2->is_valid()) {
 522         // only a float use just part of the slot
 523         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 524       } else {
 525 #ifdef ASSERT
 526         // Overwrite the unused slot with known junk
 527         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 528         __ str(rscratch1, Address(sp, st_off));
 529 #endif /* ASSERT */
 530         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 531       }
 532     }
 533   }
 534 
 535   __ mov(esp, sp); // Interp expects args on caller's expression stack
 536 
 537   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 538   __ br(rscratch1);
 539 }
 540 

 541 
 542 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 543                                     int total_args_passed,
 544                                     int comp_args_on_stack,
 545                                     const BasicType *sig_bt,
 546                                     const VMRegPair *regs) {
 547 
 548   // Note: r19_sender_sp contains the senderSP on entry. We must
 549   // preserve it since we may do a i2c -> c2i transition if we lose a
 550   // race where compiled code goes non-entrant while we get args
 551   // ready.
 552 
 553   // Adapters are frameless.
 554 
 555   // An i2c adapter is frameless because the *caller* frame, which is
 556   // interpreted, routinely repairs its own esp (from
 557   // interpreter_frame_last_sp), even if a callee has modified the
 558   // stack pointer.  It also recalculates and aligns sp.
 559 
 560   // A c2i adapter is frameless because the *callee* frame, which is
 561   // interpreted, routinely repairs its caller's sp (from sender_sp,
 562   // which is set up via the senderSP register).
 563 
 564   // In other words, if *either* the caller or callee is interpreted, we can
 565   // get the stack pointer repaired after a call.
 566 

 589       range_check(masm, rax, r11,
 590                   StubRoutines::initial_stubs_code()->code_begin(),
 591                   StubRoutines::initial_stubs_code()->code_end(),
 592                   L_ok);
 593     }
 594     if (StubRoutines::final_stubs_code() != nullptr) {
 595       range_check(masm, rax, r11,
 596                   StubRoutines::final_stubs_code()->code_begin(),
 597                   StubRoutines::final_stubs_code()->code_end(),
 598                   L_ok);
 599     }
 600     const char* msg = "i2c adapter must return to an interpreter frame";
 601     __ block_comment(msg);
 602     __ stop(msg);
 603     __ bind(L_ok);
 604     __ block_comment("} verify_i2ce ");
 605 #endif
 606   }
 607 
 608   // Cut-out for having no stack args.
 609   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 610   if (comp_args_on_stack) {
 611     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 612     __ andr(sp, rscratch1, -16);

 613   }
 614 
 615   // Will jump to the compiled code just as if compiled code was doing it.
 616   // Pre-load the register-jump target early, to schedule it better.
 617   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 618 
 619 #if INCLUDE_JVMCI
 620   if (EnableJVMCI) {
 621     // check if this call should be routed towards a specific entry point
 622     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 623     Label no_alternative_target;
 624     __ cbz(rscratch2, no_alternative_target);
 625     __ mov(rscratch1, rscratch2);
 626     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 627     __ bind(no_alternative_target);
 628   }
 629 #endif // INCLUDE_JVMCI
 630 


 631   // Now generate the shuffle code.
 632   for (int i = 0; i < total_args_passed; i++) {
 633     if (sig_bt[i] == T_VOID) {
 634       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");

 635       continue;
 636     }
 637 
 638     // Pick up 0, 1 or 2 words from SP+offset.

 639 
 640     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 641             "scrambled load targets?");
 642     // Load in argument order going down.
 643     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 644     // Point to interpreter value (vs. tag)
 645     int next_off = ld_off - Interpreter::stackElementSize;
 646     //
 647     //
 648     //
 649     VMReg r_1 = regs[i].first();
 650     VMReg r_2 = regs[i].second();
 651     if (!r_1->is_valid()) {
 652       assert(!r_2->is_valid(), "");
 653       continue;
 654     }
 655     if (r_1->is_stack()) {
 656       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 657       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 658       if (!r_2->is_valid()) {
 659         // sign extend???
 660         __ ldrsw(rscratch2, Address(esp, ld_off));
 661         __ str(rscratch2, Address(sp, st_off));
 662       } else {
 663         //
 664         // We are using two optoregs. This can be either T_OBJECT,
 665         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 666         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 667         // So we must adjust where to pick up the data to match the
 668         // interpreter.
 669         //
 670         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 671         // are accessed as negative so LSW is at LOW address
 672 
 673         // ld_off is MSW so get LSW
 674         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 675                            next_off : ld_off;
 676         __ ldr(rscratch2, Address(esp, offset));
 677         // st_off is LSW (i.e. reg.first())
 678         __ str(rscratch2, Address(sp, st_off));
 679       }
 680     } else if (r_1->is_Register()) {  // Register argument
 681       Register r = r_1->as_Register();
 682       if (r_2->is_valid()) {
 683         //
 684         // We are using two VMRegs. This can be either T_OBJECT,
 685         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 686         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 687         // So we must adjust where to pick up the data to match the
 688         // interpreter.

















 689 
 690         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 691                            next_off : ld_off;
 692 
 693         // this can be a misaligned move
 694         __ ldr(r, Address(esp, offset));
 695       } else {
 696         // sign extend and use a full word?
 697         __ ldrw(r, Address(esp, ld_off));
 698       }
 699     } else {
 700       if (!r_2->is_valid()) {
 701         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 702       } else {
 703         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 704       }
 705     }
 706   }
 707 
 708   __ mov(rscratch2, rscratch1);
 709   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 710   __ mov(rscratch1, rscratch2);
 711 
 712   // 6243940 We might end up in handle_wrong_method if
 713   // the callee is deoptimized as we race thru here. If that
 714   // happens we don't want to take a safepoint because the
 715   // caller frame will look interpreted and arguments are now
 716   // "compiled" so it is much better to make this transition
 717   // invisible to the stack walking code. Unfortunately if
 718   // we try and find the callee by normal means a safepoint
 719   // is possible. So we stash the desired callee in the thread
 720   // and the vm will find there should this case occur.
 721 
 722   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 723 
 724   __ br(rscratch1);
 725 }
 726 
 727 // ---------------------------------------------------------------
 728 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 729                                                             int total_args_passed,
 730                                                             int comp_args_on_stack,
 731                                                             const BasicType *sig_bt,
 732                                                             const VMRegPair *regs,
 733                                                             AdapterFingerPrint* fingerprint) {
 734   address i2c_entry = __ pc();
 735 
 736   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);






 737 
 738   address c2i_unverified_entry = __ pc();
 739   Label skip_fixup;










 740 
 741   Register data = rscratch2;
 742   Register receiver = j_rarg0;
 743   Register tmp = r10;  // A call-clobbered register not used for arg passing
 744 
 745   // -------------------------------------------------------------------------
 746   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 747   // to the interpreter.  The args start out packed in the compiled layout.  They
 748   // need to be unpacked into the interpreter layout.  This will almost always
 749   // require some stack space.  We grow the current (compiled) stack, then repack
 750   // the args.  We  finally end in a jump to the generic interpreter entry point.
 751   // On exit from the interpreter, the interpreter will restore our SP (lest the
 752   // compiled code, which relies solely on SP and not FP, get sick).
 753 
 754   {
 755     __ block_comment("c2i_unverified_entry {");
 756     // Method might have been compiled since the call site was patched to
 757     // interpreted; if that is the case treat it as a miss so we can get
 758     // the call site corrected.
 759     __ ic_check(1 /* end_alignment */);
 760     __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
 761 
 762     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 763     __ cbz(rscratch1, skip_fixup);
 764     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 765     __ block_comment("} c2i_unverified_entry");
 766   }
 767 
 768   address c2i_entry = __ pc();


 769 
 770   // Class initialization barrier for static methods
 771   address c2i_no_clinit_check_entry = nullptr;
 772   if (VM_Version::supports_fast_class_init_checks()) {
 773     Label L_skip_barrier;
 774 
 775     { // Bypass the barrier for non-static methods
 776       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 777       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 778       __ br(Assembler::EQ, L_skip_barrier); // non-static
 779     }
 780 
 781     __ load_method_holder(rscratch2, rmethod);
 782     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 783     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 784 
 785     __ bind(L_skip_barrier);
 786     c2i_no_clinit_check_entry = __ pc();













 787   }
 788 
 789   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 790   bs->c2i_entry_barrier(masm);
 791 
 792   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);





 793 
 794   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 795 }
 796 
 797 static int c_calling_convention_priv(const BasicType *sig_bt,
 798                                          VMRegPair *regs,
 799                                          int total_args_passed) {
 800 
 801 // We return the amount of VMRegImpl stack slots we need to reserve for all
 802 // the arguments NOT counting out_preserve_stack_slots.
 803 
 804     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 805       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 806     };
 807     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 808       c_farg0, c_farg1, c_farg2, c_farg3,
 809       c_farg4, c_farg5, c_farg6, c_farg7
 810     };
 811 
 812     uint int_args = 0;
 813     uint fp_args = 0;
 814     uint stk_args = 0; // inc by 2 each time

1752   if (method->is_synchronized()) {
1753     Label count;
1754     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1755 
1756     // Get the handle (the 2nd argument)
1757     __ mov(oop_handle_reg, c_rarg1);
1758 
1759     // Get address of the box
1760 
1761     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1762 
1763     // Load the oop from the handle
1764     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1765 
1766     if (LockingMode == LM_MONITOR) {
1767       __ b(slow_path_lock);
1768     } else if (LockingMode == LM_LEGACY) {
1769       // Load (object->mark() | 1) into swap_reg %r0
1770       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1771       __ orr(swap_reg, rscratch1, 1);




1772 
1773       // Save (object->mark() | 1) into BasicLock's displaced header
1774       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1775 
1776       // src -> dest iff dest == r0 else r0 <- dest
1777       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1778 
1779       // Hmm should this move to the slow path code area???
1780 
1781       // Test if the oopMark is an obvious stack pointer, i.e.,
1782       //  1) (mark & 3) == 0, and
1783       //  2) sp <= mark < mark + os::pagesize()
1784       // These 3 tests can be done by evaluating the following
1785       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1786       // assuming both stack pointer and pagesize have their
1787       // least significant 2 bits clear.
1788       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1789 
1790       __ sub(swap_reg, sp, swap_reg);
1791       __ neg(swap_reg, swap_reg);

3074   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3075 #endif
3076   // Clear the exception oop so GC no longer processes it as a root.
3077   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3078 
3079   // r0: exception oop
3080   // r8:  exception handler
3081   // r4: exception pc
3082   // Jump to handler
3083 
3084   __ br(r8);
3085 
3086   // Make sure all code is generated
3087   masm->flush();
3088 
3089   // Set exception blob
3090   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3091 }
3092 
3093 #endif // COMPILER2


















































































































  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "oops/method.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/jniHandles.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"

 356       break;
 357     case T_DOUBLE:
 358       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 359       if (fp_args < Argument::n_float_register_parameters_j) {
 360         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 361       } else {
 362         stk_args = align_up(stk_args, 2);
 363         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 364         stk_args += 2;
 365       }
 366       break;
 367     default:
 368       ShouldNotReachHere();
 369       break;
 370     }
 371   }
 372 
 373   return stk_args;
 374 }
 375 
 376 
 377 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 378 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 379 
 380 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 381 
 382   // Create the mapping between argument positions and registers.
 383 
 384   static const Register INT_ArgReg[java_return_convention_max_int] = {
 385     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 386   };
 387 
 388   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 389     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 390   };
 391 
 392   uint int_args = 0;
 393   uint fp_args = 0;
 394 
 395   for (int i = 0; i < total_args_passed; i++) {
 396     switch (sig_bt[i]) {
 397     case T_BOOLEAN:
 398     case T_CHAR:
 399     case T_BYTE:
 400     case T_SHORT:
 401     case T_INT:
 402       if (int_args < SharedRuntime::java_return_convention_max_int) {
 403         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 404         int_args ++;
 405       } else {
 406         return -1;
 407       }
 408       break;
 409     case T_VOID:
 410       // halves of T_LONG or T_DOUBLE
 411       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 412       regs[i].set_bad();
 413       break;
 414     case T_LONG:
 415       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 416       // fall through
 417     case T_OBJECT:
 418     case T_ARRAY:
 419     case T_ADDRESS:
 420       // Should T_METADATA be added to java_calling_convention as well ?
 421     case T_METADATA:
 422       if (int_args < SharedRuntime::java_return_convention_max_int) {
 423         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 424         int_args ++;
 425       } else {
 426         return -1;
 427       }
 428       break;
 429     case T_FLOAT:
 430       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 431         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 432         fp_args ++;
 433       } else {
 434         return -1;
 435       }
 436       break;
 437     case T_DOUBLE:
 438       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 439       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 440         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 441         fp_args ++;
 442       } else {
 443         return -1;
 444       }
 445       break;
 446     default:
 447       ShouldNotReachHere();
 448       break;
 449     }
 450   }
 451 
 452   return int_args + fp_args;
 453 }
 454 
 455 // Patch the callers callsite with entry to compiled code if it exists.
 456 static void patch_callers_callsite(MacroAssembler *masm) {
 457   Label L;
 458   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 459   __ cbz(rscratch1, L);
 460 
 461   __ enter();
 462   __ push_CPU_state();
 463 
 464   // VM needs caller's callsite
 465   // VM needs target method
 466   // This needs to be a long call since we will relocate this adapter to
 467   // the codeBuffer and it may not reach
 468 
 469 #ifndef PRODUCT
 470   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 471 #endif
 472 
 473   __ mov(c_rarg0, rmethod);
 474   __ mov(c_rarg1, lr);
 475   __ authenticate_return_address(c_rarg1);
 476   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 477   __ blr(rscratch1);
 478 
 479   // Explicit isb required because fixup_callers_callsite may change the code
 480   // stream.
 481   __ safepoint_isb();
 482 
 483   __ pop_CPU_state();
 484   // restore sp
 485   __ leave();
 486   __ bind(L);
 487 }
 488 
 489 // For each inline type argument, sig includes the list of fields of
 490 // the inline type. This utility function computes the number of
 491 // arguments for the call if inline types are passed by reference (the
 492 // calling convention the interpreter expects).
 493 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 494   int total_args_passed = 0;
 495   if (InlineTypePassFieldsAsArgs) {
 496      for (int i = 0; i < sig_extended->length(); i++) {
 497        BasicType bt = sig_extended->at(i)._bt;
 498        if (bt == T_METADATA) {
 499          // In sig_extended, an inline type argument starts with:
 500          // T_METADATA, followed by the types of the fields of the
 501          // inline type and T_VOID to mark the end of the value
 502          // type. Inline types are flattened so, for instance, in the
 503          // case of an inline type with an int field and an inline type
 504          // field that itself has 2 fields, an int and a long:
 505          // T_METADATA T_INT T_METADATA T_INT T_LONG T_VOID (second
 506          // slot for the T_LONG) T_VOID (inner inline type) T_VOID
 507          // (outer inline type)
 508          total_args_passed++;
 509          int vt = 1;
 510          do {
 511            i++;
 512            BasicType bt = sig_extended->at(i)._bt;
 513            BasicType prev_bt = sig_extended->at(i-1)._bt;
 514            if (bt == T_METADATA) {
 515              vt++;
 516            } else if (bt == T_VOID &&
 517                       prev_bt != T_LONG &&
 518                       prev_bt != T_DOUBLE) {
 519              vt--;
 520            }
 521          } while (vt != 0);
 522        } else {
 523          total_args_passed++;
 524        }
 525      }
 526   } else {
 527     total_args_passed = sig_extended->length();
 528   }
 529 
 530   return total_args_passed;
 531 }
 532 
 533 
 534 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 535                                    BasicType bt,
 536                                    BasicType prev_bt,
 537                                    size_t size_in_bytes,
 538                                    const VMRegPair& reg_pair,
 539                                    const Address& to,
 540                                    Register tmp1,
 541                                    Register tmp2,
 542                                    Register tmp3,
 543                                    int extraspace,
 544                                    bool is_oop) {
 545   if (bt == T_VOID) {
 546     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 547     return;
 548   }
 549 
 550   // Say 4 args:
 551   // i   st_off
 552   // 0   32 T_LONG
 553   // 1   24 T_VOID
 554   // 2   16 T_OBJECT
 555   // 3    8 T_BOOL
 556   // -    0 return address
 557   //
 558   // However to make thing extra confusing. Because we can fit a Java long/double in
 559   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 560   // leaves one slot empty and only stores to a single slot. In this case the
 561   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 562 
 563   bool wide = (size_in_bytes == wordSize);
 564   VMReg r_1 = reg_pair.first();
 565   VMReg r_2 = reg_pair.second();
 566   assert(r_2->is_valid() == wide, "invalid size");
 567   if (!r_1->is_valid()) {
 568     assert(!r_2->is_valid(), "");
 569     return;
 570   }
 571 
 572   if (!r_1->is_FloatRegister()) {
 573     Register val = r25;
 574     if (r_1->is_stack()) {
 575       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 576       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 577       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 578     } else {
 579       val = r_1->as_Register();
 580     }
 581     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 582     if (is_oop) {
 583       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 584     } else {
 585       __ store_sized_value(to, val, size_in_bytes);
 586     }
 587   } else {
 588     if (wide) {
 589       __ strd(r_1->as_FloatRegister(), to);
 590     } else {
 591       // only a float use just part of the slot
 592       __ strs(r_1->as_FloatRegister(), to);
 593     }
 594   }
 595 }
 596 
 597 static void gen_c2i_adapter(MacroAssembler *masm,
 598                             const GrowableArray<SigEntry>* sig_extended,


 599                             const VMRegPair *regs,
 600                             bool requires_clinit_barrier,
 601                             address& c2i_no_clinit_check_entry,
 602                             Label& skip_fixup,
 603                             address start,
 604                             OopMapSet* oop_maps,
 605                             int& frame_complete,
 606                             int& frame_size_in_words,
 607                             bool alloc_inline_receiver) {
 608   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 609     Label L_skip_barrier;
 610 
 611     { // Bypass the barrier for non-static methods
 612       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 613       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 614       __ br(Assembler::EQ, L_skip_barrier); // non-static
 615     }
 616 
 617     __ load_method_holder(rscratch2, rmethod);
 618     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 619     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 620 
 621     __ bind(L_skip_barrier);
 622     c2i_no_clinit_check_entry = __ pc();
 623   }
 624 
 625   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 626   bs->c2i_entry_barrier(masm);
 627 
 628   // Before we get into the guts of the C2I adapter, see if we should be here
 629   // at all.  We've come from compiled code and are attempting to jump to the
 630   // interpreter, which means the caller made a static call to get here
 631   // (vcalls always get a compiled target if there is one).  Check for a
 632   // compiled target.  If there is one, we need to patch the caller's call.
 633   patch_callers_callsite(masm);
 634 
 635   __ bind(skip_fixup);
 636 
 637   // Name some registers to be used in the following code. We can use
 638   // anything except r0-r7 which are arguments in the Java calling
 639   // convention, rmethod (r12), and r13 which holds the outgoing sender
 640   // SP for the interpreter.
 641   Register buf_array = r10;   // Array of buffered inline types
 642   Register buf_oop = r11;     // Buffered inline type oop
 643   Register tmp1 = r15;
 644   Register tmp2 = r16;
 645   Register tmp3 = r17;
 646 
 647   if (InlineTypePassFieldsAsArgs) {
 648     // Is there an inline type argument?
 649     bool has_inline_argument = false;
 650     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 651       has_inline_argument = (sig_extended->at(i)._bt == T_METADATA);
 652     }
 653     if (has_inline_argument) {
 654       // There is at least an inline type argument: we're coming from
 655       // compiled code so we have no buffers to back the inline types
 656       // Allocate the buffers here with a runtime call.
 657       RegisterSaver reg_save(false /* save_vectors */);
 658       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 659 
 660       frame_complete = __ offset();
 661       address the_pc = __ pc();
 662 
 663       Label retaddr;
 664       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 665 
 666       __ mov(c_rarg0, rthread);
 667       __ mov(c_rarg1, rmethod);
 668       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 669 
 670       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 671       __ blr(rscratch1);
 672       __ bind(retaddr);
 673 
 674       oop_maps->add_gc_map(__ pc() - start, map);
 675       __ reset_last_Java_frame(false);
 676 
 677       reg_save.restore_live_registers(masm);





 678 
 679       Label no_exception;
 680       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 681       __ cbz(rscratch1, no_exception);













 682 
 683       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 684       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 685       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 686 
 687       __ bind(no_exception);
 688 
 689       // We get an array of objects from the runtime call
 690       __ get_vm_result(buf_array, rthread);
 691       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 692     }
 693   }








 694 
 695   // Since all args are passed on the stack, total_args_passed *
 696   // Interpreter::stackElementSize is the space we need.
 697 
 698   int total_args_passed = compute_total_args_passed_int(sig_extended);
 699   int extraspace = total_args_passed * Interpreter::stackElementSize;
 700 
 701   // stack is aligned, keep it that way
 702   extraspace = align_up(extraspace, StackAlignmentInBytes);
 703 
 704   // set senderSP value
 705   __ mov(r19_sender_sp, sp);
 706 
 707   __ sub(sp, sp, extraspace);
 708 
 709   // Now write the args into the outgoing interpreter space
 710 
 711   // next_arg_comp is the next argument from the compiler point of
 712   // view (inline type fields are passed in registers/on the stack). In
 713   // sig_extended, an inline type argument starts with: T_METADATA,
 714   // followed by the types of the fields of the inline type and T_VOID
 715   // to mark the end of the inline type. ignored counts the number of
 716   // T_METADATA/T_VOID. next_vt_arg is the next inline type argument:
 717   // used to get the buffer for that argument from the pool of buffers
 718   // we allocated above and want to pass to the
 719   // interpreter. next_arg_int is the next argument from the
 720   // interpreter point of view (inline types are passed by reference).
 721   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 722        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 723     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 724     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 725     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 726     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 727     if (!InlineTypePassFieldsAsArgs || bt != T_METADATA) {
 728       int next_off = st_off - Interpreter::stackElementSize;
 729       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 730       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 731       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 732       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 733                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 734       next_arg_int++;
 735 #ifdef ASSERT
 736       if (bt == T_LONG || bt == T_DOUBLE) {
 737         // Overwrite the unused slot with known junk
 738         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 739         __ str(rscratch1, Address(sp, st_off));



 740       }















 741 #endif /* ASSERT */
 742     } else {
 743       ignored++;
 744       // get the buffer from the just allocated pool of buffers
 745       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_OBJECT);
 746       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 747       next_vt_arg++; next_arg_int++;
 748       int vt = 1;
 749       // write fields we get from compiled code in registers/stack
 750       // slots to the buffer: we know we are done with that inline type
 751       // argument when we hit the T_VOID that acts as an end of inline
 752       // type delimiter for this inline type. Inline types are flattened
 753       // so we might encounter embedded inline types. Each entry in
 754       // sig_extended contains a field offset in the buffer.
 755       Label L_null;
 756       do {
 757         next_arg_comp++;
 758         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 759         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 760         if (bt == T_METADATA) {
 761           vt++;
 762           ignored++;
 763         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 764           vt--;
 765           ignored++;
 766         } else {
 767           int off = sig_extended->at(next_arg_comp)._offset;
 768           if (off == -1) {
 769             // Nullable inline type argument, emit null check
 770             VMReg reg = regs[next_arg_comp-ignored].first();
 771             Label L_notNull;
 772             if (reg->is_stack()) {
 773               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 774               __ ldrb(tmp1, Address(sp, ld_off));
 775               __ cbnz(tmp1, L_notNull);
 776             } else {
 777               __ cbnz(reg->as_Register(), L_notNull);
 778             }
 779             __ str(zr, Address(sp, st_off));
 780             __ b(L_null);
 781             __ bind(L_notNull);
 782             continue;
 783           }
 784           assert(off > 0, "offset in object should be positive");
 785           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 786           bool is_oop = is_reference_type(bt);
 787           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 788                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 789         }
 790       } while (vt != 0);
 791       // pass the buffer to the interpreter
 792       __ str(buf_oop, Address(sp, st_off));
 793       __ bind(L_null);










 794     }
 795   }
 796 
 797   __ mov(esp, sp); // Interp expects args on caller's expression stack
 798 
 799   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 800   __ br(rscratch1);
 801 }
 802 
 803 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 804 





 805 
 806   // Note: r19_sender_sp contains the senderSP on entry. We must
 807   // preserve it since we may do a i2c -> c2i transition if we lose a
 808   // race where compiled code goes non-entrant while we get args
 809   // ready.
 810 
 811   // Adapters are frameless.
 812 
 813   // An i2c adapter is frameless because the *caller* frame, which is
 814   // interpreted, routinely repairs its own esp (from
 815   // interpreter_frame_last_sp), even if a callee has modified the
 816   // stack pointer.  It also recalculates and aligns sp.
 817 
 818   // A c2i adapter is frameless because the *callee* frame, which is
 819   // interpreted, routinely repairs its caller's sp (from sender_sp,
 820   // which is set up via the senderSP register).
 821 
 822   // In other words, if *either* the caller or callee is interpreted, we can
 823   // get the stack pointer repaired after a call.
 824 

 847       range_check(masm, rax, r11,
 848                   StubRoutines::initial_stubs_code()->code_begin(),
 849                   StubRoutines::initial_stubs_code()->code_end(),
 850                   L_ok);
 851     }
 852     if (StubRoutines::final_stubs_code() != nullptr) {
 853       range_check(masm, rax, r11,
 854                   StubRoutines::final_stubs_code()->code_begin(),
 855                   StubRoutines::final_stubs_code()->code_end(),
 856                   L_ok);
 857     }
 858     const char* msg = "i2c adapter must return to an interpreter frame";
 859     __ block_comment(msg);
 860     __ stop(msg);
 861     __ bind(L_ok);
 862     __ block_comment("} verify_i2ce ");
 863 #endif
 864   }
 865 
 866   // Cut-out for having no stack args.
 867   int comp_words_on_stack = 0;
 868   if (comp_args_on_stack) {
 869      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 870      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 871      __ andr(sp, rscratch1, -16);
 872   }
 873 
 874   // Will jump to the compiled code just as if compiled code was doing it.
 875   // Pre-load the register-jump target early, to schedule it better.
 876   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 877 
 878 #if INCLUDE_JVMCI
 879   if (EnableJVMCI) {
 880     // check if this call should be routed towards a specific entry point
 881     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 882     Label no_alternative_target;
 883     __ cbz(rscratch2, no_alternative_target);
 884     __ mov(rscratch1, rscratch2);
 885     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 886     __ bind(no_alternative_target);
 887   }
 888 #endif // INCLUDE_JVMCI
 889 
 890   int total_args_passed = sig->length();
 891 
 892   // Now generate the shuffle code.
 893   for (int i = 0; i < total_args_passed; i++) {
 894     BasicType bt = sig->at(i)._bt;
 895     if (bt == T_VOID) {
 896       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 897       continue;
 898     }
 899 
 900     // Pick up 0, 1 or 2 words from SP+offset.
 901     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 902 


 903     // Load in argument order going down.
 904     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 905     // Point to interpreter value (vs. tag)
 906     int next_off = ld_off - Interpreter::stackElementSize;
 907     //
 908     //
 909     //
 910     VMReg r_1 = regs[i].first();
 911     VMReg r_2 = regs[i].second();
 912     if (!r_1->is_valid()) {
 913       assert(!r_2->is_valid(), "");
 914       continue;
 915     }
 916     if (r_1->is_stack()) {
 917       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 918       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 919       if (!r_2->is_valid()) {
 920         // sign extend???
 921         __ ldrsw(rscratch2, Address(esp, ld_off));
 922         __ str(rscratch2, Address(sp, st_off));
 923       } else {
 924         //
 925         // We are using two optoregs. This can be either T_OBJECT,
 926         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 927         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 928         // So we must adjust where to pick up the data to match the
 929         // interpreter.
 930         //
 931         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 932         // are accessed as negative so LSW is at LOW address
 933 
 934         // ld_off is MSW so get LSW
 935         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 936         __ ldr(rscratch2, Address(esp, offset));
 937         // st_off is LSW (i.e. reg.first())
 938          __ str(rscratch2, Address(sp, st_off));
 939        }
 940      } else if (r_1->is_Register()) {  // Register argument
 941        Register r = r_1->as_Register();
 942        if (r_2->is_valid()) {
 943          //
 944          // We are using two VMRegs. This can be either T_OBJECT,
 945          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 946          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 947          // So we must adjust where to pick up the data to match the
 948          // interpreter.
 949 
 950         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 951 
 952          // this can be a misaligned move
 953          __ ldr(r, Address(esp, offset));
 954        } else {
 955          // sign extend and use a full word?
 956          __ ldrw(r, Address(esp, ld_off));
 957        }
 958      } else {
 959        if (!r_2->is_valid()) {
 960          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 961        } else {
 962          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 963        }
 964      }
 965    }
 966 

















 967 
 968   __ mov(rscratch2, rscratch1);
 969   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 970   __ mov(rscratch1, rscratch2);
 971 
 972   // 6243940 We might end up in handle_wrong_method if
 973   // the callee is deoptimized as we race thru here. If that
 974   // happens we don't want to take a safepoint because the
 975   // caller frame will look interpreted and arguments are now
 976   // "compiled" so it is much better to make this transition
 977   // invisible to the stack walking code. Unfortunately if
 978   // we try and find the callee by normal means a safepoint
 979   // is possible. So we stash the desired callee in the thread
 980   // and the vm will find there should this case occur.
 981 
 982   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 983   __ br(rscratch1);
 984 }
 985 
 986 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
 987   Register data = rscratch2;
 988   __ ic_check(1 /* end_alignment */);
 989   __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));




 990 
 991   // Method might have been compiled since the call site was patched to
 992   // interpreted; if that is the case treat it as a miss so we can get
 993   // the call site corrected.
 994   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 995   __ cbz(rscratch1, skip_fixup);
 996   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 997 }
 998 
 999 // ---------------------------------------------------------------
1000 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1001                                                             int comp_args_on_stack,
1002                                                             const GrowableArray<SigEntry>* sig,
1003                                                             const VMRegPair* regs,
1004                                                             const GrowableArray<SigEntry>* sig_cc,
1005                                                             const VMRegPair* regs_cc,
1006                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1007                                                             const VMRegPair* regs_cc_ro,
1008                                                             AdapterFingerPrint* fingerprint,
1009                                                             AdapterBlob*& new_adapter,
1010                                                             bool allocate_code_blob) {
1011 
1012   address i2c_entry = __ pc();
1013   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);

1014 
1015   // -------------------------------------------------------------------------
1016   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
1017   // to the interpreter.  The args start out packed in the compiled layout.  They
1018   // need to be unpacked into the interpreter layout.  This will almost always
1019   // require some stack space.  We grow the current (compiled) stack, then repack
1020   // the args.  We  finally end in a jump to the generic interpreter entry point.
1021   // On exit from the interpreter, the interpreter will restore our SP (lest the
1022   // compiled code, which relies solely on SP and not FP, get sick).
1023 
1024   address c2i_unverified_entry        = __ pc();
1025   address c2i_unverified_inline_entry = __ pc();
1026   Label skip_fixup;




1027 
1028   gen_inline_cache_check(masm, skip_fixup);




1029 
1030   OopMapSet* oop_maps = new OopMapSet();
1031   int frame_complete = CodeOffsets::frame_never_safe;
1032   int frame_size_in_words = 0;
1033 
1034   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1035   address c2i_no_clinit_check_entry = nullptr;
1036   address c2i_inline_ro_entry = __ pc();
1037   if (regs_cc != regs_cc_ro) {
1038     // No class init barrier needed because method is guaranteed to be non-static
1039     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1040                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1041     skip_fixup.reset();
1042   }





1043 
1044   // Scalarized c2i adapter
1045   address c2i_entry        = __ pc();
1046   address c2i_inline_entry = __ pc();
1047   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1048                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1049 
1050   // Non-scalarized c2i adapter
1051   if (regs != regs_cc) {
1052     c2i_unverified_inline_entry = __ pc();
1053     Label inline_entry_skip_fixup;
1054     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1055 
1056     c2i_inline_entry = __ pc();
1057     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1058                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1059   }
1060 


1061 
1062   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1063   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1064   if (allocate_code_blob) {
1065     bool caller_must_gc_arguments = (regs != regs_cc);
1066     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1067   }
1068 
1069   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1070 }
1071 
1072 static int c_calling_convention_priv(const BasicType *sig_bt,
1073                                          VMRegPair *regs,
1074                                          int total_args_passed) {
1075 
1076 // We return the amount of VMRegImpl stack slots we need to reserve for all
1077 // the arguments NOT counting out_preserve_stack_slots.
1078 
1079     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1080       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1081     };
1082     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1083       c_farg0, c_farg1, c_farg2, c_farg3,
1084       c_farg4, c_farg5, c_farg6, c_farg7
1085     };
1086 
1087     uint int_args = 0;
1088     uint fp_args = 0;
1089     uint stk_args = 0; // inc by 2 each time

2027   if (method->is_synchronized()) {
2028     Label count;
2029     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2030 
2031     // Get the handle (the 2nd argument)
2032     __ mov(oop_handle_reg, c_rarg1);
2033 
2034     // Get address of the box
2035 
2036     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2037 
2038     // Load the oop from the handle
2039     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2040 
2041     if (LockingMode == LM_MONITOR) {
2042       __ b(slow_path_lock);
2043     } else if (LockingMode == LM_LEGACY) {
2044       // Load (object->mark() | 1) into swap_reg %r0
2045       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2046       __ orr(swap_reg, rscratch1, 1);
2047       if (EnableValhalla) {
2048         // Mask inline_type bit such that we go to the slow path if object is an inline type
2049         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2050       }
2051 
2052       // Save (object->mark() | 1) into BasicLock's displaced header
2053       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2054 
2055       // src -> dest iff dest == r0 else r0 <- dest
2056       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
2057 
2058       // Hmm should this move to the slow path code area???
2059 
2060       // Test if the oopMark is an obvious stack pointer, i.e.,
2061       //  1) (mark & 3) == 0, and
2062       //  2) sp <= mark < mark + os::pagesize()
2063       // These 3 tests can be done by evaluating the following
2064       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2065       // assuming both stack pointer and pagesize have their
2066       // least significant 2 bits clear.
2067       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2068 
2069       __ sub(swap_reg, sp, swap_reg);
2070       __ neg(swap_reg, swap_reg);

3353   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3354 #endif
3355   // Clear the exception oop so GC no longer processes it as a root.
3356   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3357 
3358   // r0: exception oop
3359   // r8:  exception handler
3360   // r4: exception pc
3361   // Jump to handler
3362 
3363   __ br(r8);
3364 
3365   // Make sure all code is generated
3366   masm->flush();
3367 
3368   // Set exception blob
3369   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3370 }
3371 
3372 #endif // COMPILER2
3373 
3374 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3375   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3376   CodeBuffer buffer(buf);
3377   short buffer_locs[20];
3378   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3379                                          sizeof(buffer_locs)/sizeof(relocInfo));
3380 
3381   MacroAssembler _masm(&buffer);
3382   MacroAssembler* masm = &_masm;
3383 
3384   const Array<SigEntry>* sig_vk = vk->extended_sig();
3385   const Array<VMRegPair>* regs = vk->return_regs();
3386 
3387   int pack_fields_jobject_off = __ offset();
3388   // Resolve pre-allocated buffer from JNI handle.
3389   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3390   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3391   __ ldr(r0, Address(Rresult));
3392   __ resolve_jobject(r0 /* value */,
3393                      rthread /* thread */,
3394                      r12 /* tmp */);
3395   __ str(r0, Address(Rresult));
3396 
3397   int pack_fields_off = __ offset();
3398 
3399   int j = 1;
3400   for (int i = 0; i < sig_vk->length(); i++) {
3401     BasicType bt = sig_vk->at(i)._bt;
3402     if (bt == T_METADATA) {
3403       continue;
3404     }
3405     if (bt == T_VOID) {
3406       if (sig_vk->at(i-1)._bt == T_LONG ||
3407           sig_vk->at(i-1)._bt == T_DOUBLE) {
3408         j++;
3409       }
3410       continue;
3411     }
3412     int off = sig_vk->at(i)._offset;
3413     VMRegPair pair = regs->at(j);
3414     VMReg r_1 = pair.first();
3415     VMReg r_2 = pair.second();
3416     Address to(r0, off);
3417     if (bt == T_FLOAT) {
3418       __ strs(r_1->as_FloatRegister(), to);
3419     } else if (bt == T_DOUBLE) {
3420       __ strd(r_1->as_FloatRegister(), to);
3421     } else {
3422       Register val = r_1->as_Register();
3423       assert_different_registers(to.base(), val, r15, r16, r17);
3424       if (is_reference_type(bt)) {
3425         __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3426       } else {
3427         __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3428       }
3429     }
3430     j++;
3431   }
3432   assert(j == regs->length(), "missed a field?");
3433 
3434   __ ret(lr);
3435 
3436   int unpack_fields_off = __ offset();
3437 
3438   Label skip;
3439   __ cbz(r0, skip);
3440 
3441   j = 1;
3442   for (int i = 0; i < sig_vk->length(); i++) {
3443     BasicType bt = sig_vk->at(i)._bt;
3444     if (bt == T_METADATA) {
3445       continue;
3446     }
3447     if (bt == T_VOID) {
3448       if (sig_vk->at(i-1)._bt == T_LONG ||
3449           sig_vk->at(i-1)._bt == T_DOUBLE) {
3450         j++;
3451       }
3452       continue;
3453     }
3454     int off = sig_vk->at(i)._offset;
3455     assert(off > 0, "offset in object should be positive");
3456     VMRegPair pair = regs->at(j);
3457     VMReg r_1 = pair.first();
3458     VMReg r_2 = pair.second();
3459     Address from(r0, off);
3460     if (bt == T_FLOAT) {
3461       __ ldrs(r_1->as_FloatRegister(), from);
3462     } else if (bt == T_DOUBLE) {
3463       __ ldrd(r_1->as_FloatRegister(), from);
3464     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3465       assert_different_registers(r0, r_1->as_Register());
3466       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3467     } else {
3468       assert(is_java_primitive(bt), "unexpected basic type");
3469       assert_different_registers(r0, r_1->as_Register());
3470 
3471       size_t size_in_bytes = type2aelembytes(bt);
3472       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3473     }
3474     j++;
3475   }
3476   assert(j == regs->length(), "missed a field?");
3477 
3478   __ bind(skip);
3479 
3480   __ ret(lr);
3481 
3482   __ flush();
3483 
3484   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3485 }
< prev index next >