< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"

  30 #include "code/codeCache.hpp"
  31 #include "code/debugInfoRec.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/vtableStubs.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/compiledICHolder.hpp"
  42 #include "oops/klass.inline.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "runtime/jniHandles.hpp"
  45 #include "runtime/safepointMechanism.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/signature.hpp"
  48 #include "runtime/stubRoutines.hpp"
  49 #include "runtime/vframeArray.hpp"

 340     case T_SHORT:
 341     case T_INT:
 342       if (int_args < Argument::n_int_register_parameters_j) {
 343         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 344       } else {
 345         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 346         stk_args += 2;
 347       }
 348       break;
 349     case T_VOID:
 350       // halves of T_LONG or T_DOUBLE
 351       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 352       regs[i].set_bad();
 353       break;
 354     case T_LONG:
 355       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 356       // fall through
 357     case T_OBJECT:
 358     case T_ARRAY:
 359     case T_ADDRESS:

 360       if (int_args < Argument::n_int_register_parameters_j) {
 361         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 362       } else {
 363         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 364         stk_args += 2;
 365       }
 366       break;
 367     case T_FLOAT:
 368       if (fp_args < Argument::n_float_register_parameters_j) {
 369         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 370       } else {
 371         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 372         stk_args += 2;
 373       }
 374       break;
 375     case T_DOUBLE:
 376       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 377       if (fp_args < Argument::n_float_register_parameters_j) {
 378         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 379       } else {
 380         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 381         stk_args += 2;
 382       }
 383       break;
 384     default:
 385       ShouldNotReachHere();
 386       break;
 387     }
 388   }
 389 
 390   return align_up(stk_args, 2);
 391 }
 392 
















































































 393 // Patch the callers callsite with entry to compiled code if it exists.
 394 static void patch_callers_callsite(MacroAssembler *masm) {
 395   Label L;
 396   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 397   __ cbz(rscratch1, L);
 398 
 399   __ enter();
 400   __ push_CPU_state();
 401 
 402   // VM needs caller's callsite
 403   // VM needs target method
 404   // This needs to be a long call since we will relocate this adapter to
 405   // the codeBuffer and it may not reach
 406 
 407 #ifndef PRODUCT
 408   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 409 #endif
 410 
 411   __ mov(c_rarg0, rmethod);
 412   __ mov(c_rarg1, lr);
 413   __ authenticate_return_address(c_rarg1, rscratch1);
 414   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 415   __ blr(rscratch1);
 416 
 417   // Explicit isb required because fixup_callers_callsite may change the code
 418   // stream.
 419   __ safepoint_isb();
 420 
 421   __ pop_CPU_state();
 422   // restore sp
 423   __ leave();
 424   __ bind(L);
 425 }
 426 













































































































 427 static void gen_c2i_adapter(MacroAssembler *masm,
 428                             int total_args_passed,
 429                             int comp_args_on_stack,
 430                             const BasicType *sig_bt,
 431                             const VMRegPair *regs,
 432                             Label& skip_fixup) {






 433   // Before we get into the guts of the C2I adapter, see if we should be here
 434   // at all.  We've come from compiled code and are attempting to jump to the
 435   // interpreter, which means the caller made a static call to get here
 436   // (vcalls always get a compiled target if there is one).  Check for a
 437   // compiled target.  If there is one, we need to patch the caller's call.
 438   patch_callers_callsite(masm);
 439 
 440   __ bind(skip_fixup);
 441 
 442   int words_pushed = 0;





















 443 
 444   // Since all args are passed on the stack, total_args_passed *
 445   // Interpreter::stackElementSize is the space we need.
 446 
 447   int extraspace = total_args_passed * Interpreter::stackElementSize;

 448 
 449   __ mov(r13, sp);


 450 
 451   // stack is aligned, keep it that way
 452   extraspace = align_up(extraspace, 2*wordSize);

 453 
 454   if (extraspace)
 455     __ sub(sp, sp, extraspace);
 456 
 457   // Now write the args into the outgoing interpreter space
 458   for (int i = 0; i < total_args_passed; i++) {
 459     if (sig_bt[i] == T_VOID) {
 460       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 461       continue;
 462     }
 463 
 464     // offset to start parameters
 465     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 466     int next_off = st_off - Interpreter::stackElementSize;
 467 
 468     // Say 4 args:
 469     // i   st_off
 470     // 0   32 T_LONG
 471     // 1   24 T_VOID
 472     // 2   16 T_OBJECT
 473     // 3    8 T_BOOL
 474     // -    0 return address
 475     //
 476     // However to make thing extra confusing. Because we can fit a Java long/double in
 477     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 478     // leaves one slot empty and only stores to a single slot. In this case the
 479     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 480 
 481     VMReg r_1 = regs[i].first();
 482     VMReg r_2 = regs[i].second();
 483     if (!r_1->is_valid()) {
 484       assert(!r_2->is_valid(), "");
 485       continue;




 486     }
 487     if (r_1->is_stack()) {
 488       // memory to memory use rscratch1
 489       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 490                     + extraspace
 491                     + words_pushed * wordSize);
 492       if (!r_2->is_valid()) {
 493         // sign extend??
 494         __ ldrw(rscratch1, Address(sp, ld_off));
 495         __ str(rscratch1, Address(sp, st_off));
 496 
 497       } else {







 498 
 499         __ ldr(rscratch1, Address(sp, ld_off));



 500 
 501         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 502         // T_DOUBLE and T_LONG use two slots in the interpreter
 503         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 504           // ld_off == LSW, ld_off+wordSize == MSW
 505           // st_off == MSW, next_off == LSW
 506           __ str(rscratch1, Address(sp, next_off));




















 507 #ifdef ASSERT
 508           // Overwrite the unused slot with known junk
 509           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 510           __ str(rscratch1, Address(sp, st_off));
 511 #endif /* ASSERT */
 512         } else {
 513           __ str(rscratch1, Address(sp, st_off));
 514         }
 515       }
 516     } else if (r_1->is_Register()) {
 517       Register r = r_1->as_Register();
 518       if (!r_2->is_valid()) {
 519         // must be only an int (or less ) so move only 32bits to slot
 520         // why not sign extend??
 521         __ str(r, Address(sp, st_off));
 522       } else {
 523         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 524         // T_DOUBLE and T_LONG use two slots in the interpreter
 525         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 526           // jlong/double in gpr
 527 #ifdef ASSERT
 528           // Overwrite the unused slot with known junk
 529           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 530           __ str(rscratch1, Address(sp, st_off));
 531 #endif /* ASSERT */
 532           __ str(r, Address(sp, next_off));























 533         } else {
 534           __ str(r, Address(sp, st_off));





















 535         }
 536       }
 537     } else {
 538       assert(r_1->is_FloatRegister(), "");
 539       if (!r_2->is_valid()) {
 540         // only a float use just part of the slot
 541         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 542       } else {
 543 #ifdef ASSERT
 544         // Overwrite the unused slot with known junk
 545         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 546         __ str(rscratch1, Address(sp, st_off));
 547 #endif /* ASSERT */
 548         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 549       }
 550     }
 551   }
 552 
 553   __ mov(esp, sp); // Interp expects args on caller's expression stack
 554 
 555   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 556   __ br(rscratch1);
 557 }
 558 

 559 
 560 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 561                                     int total_args_passed,
 562                                     int comp_args_on_stack,
 563                                     const BasicType *sig_bt,
 564                                     const VMRegPair *regs) {
 565 
 566   // Note: r13 contains the senderSP on entry. We must preserve it since
 567   // we may do a i2c -> c2i transition if we lose a race where compiled
 568   // code goes non-entrant while we get args ready.
 569 
 570   // In addition we use r13 to locate all the interpreter args because
 571   // we must align the stack to 16 bytes.
 572 
 573   // Adapters are frameless.
 574 
 575   // An i2c adapter is frameless because the *caller* frame, which is
 576   // interpreted, routinely repairs its own esp (from
 577   // interpreter_frame_last_sp), even if a callee has modified the
 578   // stack pointer.  It also recalculates and aligns sp.
 579 
 580   // A c2i adapter is frameless because the *callee* frame, which is
 581   // interpreted, routinely repairs its caller's sp (from sender_sp,
 582   // which is set up via the senderSP register).
 583 
 584   // In other words, if *either* the caller or callee is interpreted, we can

 604       range_check(masm, rax, r11,
 605                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 606                   L_ok);
 607     if (StubRoutines::code1() != NULL)
 608       range_check(masm, rax, r11,
 609                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 610                   L_ok);
 611     if (StubRoutines::code2() != NULL)
 612       range_check(masm, rax, r11,
 613                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 614                   L_ok);
 615     const char* msg = "i2c adapter must return to an interpreter frame";
 616     __ block_comment(msg);
 617     __ stop(msg);
 618     __ bind(L_ok);
 619     __ block_comment("} verify_i2ce ");
 620 #endif
 621   }
 622 
 623   // Cut-out for having no stack args.
 624   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 625   if (comp_args_on_stack) {
 626     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 627     __ andr(sp, rscratch1, -16);

 628   }
 629 
 630   // Will jump to the compiled code just as if compiled code was doing it.
 631   // Pre-load the register-jump target early, to schedule it better.
 632   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 633 
 634 #if INCLUDE_JVMCI
 635   if (EnableJVMCI) {
 636     // check if this call should be routed towards a specific entry point
 637     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 638     Label no_alternative_target;
 639     __ cbz(rscratch2, no_alternative_target);
 640     __ mov(rscratch1, rscratch2);
 641     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 642     __ bind(no_alternative_target);
 643   }
 644 #endif // INCLUDE_JVMCI
 645 


 646   // Now generate the shuffle code.
 647   for (int i = 0; i < total_args_passed; i++) {
 648     if (sig_bt[i] == T_VOID) {
 649       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");



 650       continue;
 651     }
 652 
 653     // Pick up 0, 1 or 2 words from SP+offset.

 654 
 655     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 656             "scrambled load targets?");
 657     // Load in argument order going down.
 658     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 659     // Point to interpreter value (vs. tag)
 660     int next_off = ld_off - Interpreter::stackElementSize;
 661     //
 662     //
 663     //
 664     VMReg r_1 = regs[i].first();
 665     VMReg r_2 = regs[i].second();
 666     if (!r_1->is_valid()) {
 667       assert(!r_2->is_valid(), "");
 668       continue;
 669     }
 670     if (r_1->is_stack()) {
 671       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 672       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 673       if (!r_2->is_valid()) {
 674         // sign extend???
 675         __ ldrsw(rscratch2, Address(esp, ld_off));
 676         __ str(rscratch2, Address(sp, st_off));
 677       } else {
 678         //
 679         // We are using two optoregs. This can be either T_OBJECT,
 680         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 681         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 682         // So we must adjust where to pick up the data to match the
 683         // interpreter.
 684         //
 685         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 686         // are accessed as negative so LSW is at LOW address
 687 
 688         // ld_off is MSW so get LSW
 689         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 690                            next_off : ld_off;
 691         __ ldr(rscratch2, Address(esp, offset));
 692         // st_off is LSW (i.e. reg.first())
 693         __ str(rscratch2, Address(sp, st_off));
 694       }
 695     } else if (r_1->is_Register()) {  // Register argument
 696       Register r = r_1->as_Register();
 697       if (r_2->is_valid()) {
 698         //
 699         // We are using two VMRegs. This can be either T_OBJECT,
 700         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 701         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 702         // So we must adjust where to pick up the data to match the
 703         // interpreter.

















 704 
 705         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 706                            next_off : ld_off;
 707 
 708         // this can be a misaligned move
 709         __ ldr(r, Address(esp, offset));
 710       } else {
 711         // sign extend and use a full word?
 712         __ ldrw(r, Address(esp, ld_off));
 713       }
 714     } else {
 715       if (!r_2->is_valid()) {
 716         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 717       } else {
 718         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 719       }
 720     }
 721   }
 722 
 723   // 6243940 We might end up in handle_wrong_method if
 724   // the callee is deoptimized as we race thru here. If that
 725   // happens we don't want to take a safepoint because the
 726   // caller frame will look interpreted and arguments are now
 727   // "compiled" so it is much better to make this transition
 728   // invisible to the stack walking code. Unfortunately if
 729   // we try and find the callee by normal means a safepoint
 730   // is possible. So we stash the desired callee in the thread
 731   // and the vm will find there should this case occur.
 732 
 733   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 734 
 735   __ br(rscratch1);
 736 }
 737 
 738 // ---------------------------------------------------------------
 739 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 740                                                             int total_args_passed,
 741                                                             int comp_args_on_stack,
 742                                                             const BasicType *sig_bt,
 743                                                             const VMRegPair *regs,
 744                                                             AdapterFingerPrint* fingerprint) {
 745   address i2c_entry = __ pc();
 746 
 747   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 748 
 749   address c2i_unverified_entry = __ pc();
 750   Label skip_fixup;
 751 
 752   Label ok;
 753 
 754   Register holder = rscratch2;
 755   Register receiver = j_rarg0;
 756   Register tmp = r10;  // A call-clobbered register not used for arg passing
 757 
 758   // -------------------------------------------------------------------------
 759   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 760   // to the interpreter.  The args start out packed in the compiled layout.  They
 761   // need to be unpacked into the interpreter layout.  This will almost always
 762   // require some stack space.  We grow the current (compiled) stack, then repack
 763   // the args.  We  finally end in a jump to the generic interpreter entry point.
 764   // On exit from the interpreter, the interpreter will restore our SP (lest the
 765   // compiled code, which relys solely on SP and not FP, get sick).
 766 
 767   {
 768     __ block_comment("c2i_unverified_entry {");
 769     __ load_klass(rscratch1, receiver);
 770     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 771     __ cmp(rscratch1, tmp);
 772     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 773     __ br(Assembler::EQ, ok);
 774     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 775 
 776     __ bind(ok);
 777     // Method might have been compiled since the call site was patched to
 778     // interpreted; if that is the case treat it as a miss so we can get
 779     // the call site corrected.
 780     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 781     __ cbz(rscratch1, skip_fixup);
 782     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 783     __ block_comment("} c2i_unverified_entry");
 784   }


































 785 

 786   address c2i_entry = __ pc();
 787 
 788   // Class initialization barrier for static methods
 789   address c2i_no_clinit_check_entry = NULL;
 790   if (VM_Version::supports_fast_class_init_checks()) {
 791     Label L_skip_barrier;
 792 
 793     { // Bypass the barrier for non-static methods
 794       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 795       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 796       __ br(Assembler::EQ, L_skip_barrier); // non-static
 797     }
 798 
 799     __ load_method_holder(rscratch2, rmethod);
 800     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 801     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 802 
 803     __ bind(L_skip_barrier);
 804     c2i_no_clinit_check_entry = __ pc();
 805   }
 806 
 807   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 808   bs->c2i_entry_barrier(masm);
 809 
 810   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);













 811 
 812   __ flush();
 813   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);








 814 }
 815 
 816 static int c_calling_convention_priv(const BasicType *sig_bt,
 817                                          VMRegPair *regs,
 818                                          VMRegPair *regs2,
 819                                          int total_args_passed) {
 820   assert(regs2 == NULL, "not needed on AArch64");
 821 
 822 // We return the amount of VMRegImpl stack slots we need to reserve for all
 823 // the arguments NOT counting out_preserve_stack_slots.
 824 
 825     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 826       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 827     };
 828     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 829       c_farg0, c_farg1, c_farg2, c_farg3,
 830       c_farg4, c_farg5, c_farg6, c_farg7
 831     };
 832 
 833     uint int_args = 0;

 841       case T_BYTE:
 842       case T_SHORT:
 843       case T_INT:
 844         if (int_args < Argument::n_int_register_parameters_c) {
 845           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 846         } else {
 847 #ifdef __APPLE__
 848           // Less-than word types are stored one after another.
 849           // The code is unable to handle this so bailout.
 850           return -1;
 851 #endif
 852           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 853           stk_args += 2;
 854         }
 855         break;
 856       case T_LONG:
 857         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 858         // fall through
 859       case T_OBJECT:
 860       case T_ARRAY:

 861       case T_ADDRESS:
 862       case T_METADATA:
 863         if (int_args < Argument::n_int_register_parameters_c) {
 864           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 865         } else {
 866           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 867           stk_args += 2;
 868         }
 869         break;
 870       case T_FLOAT:
 871         if (fp_args < Argument::n_float_register_parameters_c) {
 872           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 873         } else {
 874 #ifdef __APPLE__
 875           // Less-than word types are stored one after another.
 876           // The code is unable to handle this so bailout.
 877           return -1;
 878 #endif
 879           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 880           stk_args += 2;

1515   int temploc = -1;
1516   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1517     int i = arg_order.at(ai);
1518     int c_arg = arg_order.at(ai + 1);
1519     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1520     assert(c_arg != -1 && i != -1, "wrong order");
1521 #ifdef ASSERT
1522     if (in_regs[i].first()->is_Register()) {
1523       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1524     } else if (in_regs[i].first()->is_FloatRegister()) {
1525       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1526     }
1527     if (out_regs[c_arg].first()->is_Register()) {
1528       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1529     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1530       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1531     }
1532 #endif /* ASSERT */
1533     switch (in_sig_bt[i]) {
1534       case T_ARRAY:

1535       case T_OBJECT:
1536         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1537                     ((i == 0) && (!is_static)),
1538                     &receiver_offset);
1539         int_args++;
1540         break;
1541       case T_VOID:
1542         break;
1543 
1544       case T_FLOAT:
1545         float_move(masm, in_regs[i], out_regs[c_arg]);
1546         float_args++;
1547         break;
1548 
1549       case T_DOUBLE:
1550         assert( i + 1 < total_in_args &&
1551                 in_sig_bt[i + 1] == T_VOID &&
1552                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1553         double_move(masm, in_regs[i], out_regs[c_arg]);
1554         float_args++;

1630   Label lock_done;
1631 
1632   if (method->is_synchronized()) {
1633 
1634     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1635 
1636     // Get the handle (the 2nd argument)
1637     __ mov(oop_handle_reg, c_rarg1);
1638 
1639     // Get address of the box
1640 
1641     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1642 
1643     // Load the oop from the handle
1644     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1645 
1646     if (!UseHeavyMonitors) {
1647       // Load (object->mark() | 1) into swap_reg %r0
1648       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1649       __ orr(swap_reg, rscratch1, 1);




1650 
1651       // Save (object->mark() | 1) into BasicLock's displaced header
1652       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1653 
1654       // src -> dest iff dest == r0 else r0 <- dest
1655       { Label here;
1656         __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1657       }
1658 
1659       // Hmm should this move to the slow path code area???
1660 
1661       // Test if the oopMark is an obvious stack pointer, i.e.,
1662       //  1) (mark & 3) == 0, and
1663       //  2) sp <= mark < mark + os::pagesize()
1664       // These 3 tests can be done by evaluating the following
1665       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1666       // assuming both stack pointer and pagesize have their
1667       // least significant 2 bits clear.
1668       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1669 

1695 
1696   rt_call(masm, native_func);
1697 
1698   __ bind(native_return);
1699 
1700   intptr_t return_pc = (intptr_t) __ pc();
1701   oop_maps->add_gc_map(return_pc - start, map);
1702 
1703   // Unpack native results.
1704   switch (ret_type) {
1705   case T_BOOLEAN: __ c2bool(r0);                     break;
1706   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1707   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1708   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1709   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1710   case T_DOUBLE :
1711   case T_FLOAT  :
1712     // Result is in v0 we'll save as needed
1713     break;
1714   case T_ARRAY:                 // Really a handle

1715   case T_OBJECT:                // Really a handle
1716       break; // can't de-handlize until after safepoint check
1717   case T_VOID: break;
1718   case T_LONG: break;
1719   default       : ShouldNotReachHere();
1720   }
1721 
1722   Label safepoint_in_progress, safepoint_in_progress_done;
1723   Label after_transition;
1724 
1725   // Switch thread to "native transition" state before reading the synchronization state.
1726   // This additional state is necessary because reading and testing the synchronization
1727   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1728   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1729   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1730   //     Thread A is resumed to finish this native method, but doesn't block here since it
1731   //     didn't see any synchronization is progress, and escapes.
1732   __ mov(rscratch1, _thread_in_native_trans);
1733 
1734   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));

2937 #ifdef ASSERT
2938   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
2939   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
2940 #endif
2941   // Clear the exception oop so GC no longer processes it as a root.
2942   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
2943 
2944   // r0: exception oop
2945   // r8:  exception handler
2946   // r4: exception pc
2947   // Jump to handler
2948 
2949   __ br(r8);
2950 
2951   // Make sure all code is generated
2952   masm->flush();
2953 
2954   // Set exception blob
2955   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
2956 }


























































































































2957 
2958 // ---------------------------------------------------------------
2959 
2960 class NativeInvokerGenerator : public StubCodeGenerator {
2961   address _call_target;
2962   int _shadow_space_bytes;
2963 
2964   const GrowableArray<VMReg>& _input_registers;
2965   const GrowableArray<VMReg>& _output_registers;
2966 
2967   int _frame_complete;
2968   int _framesize;
2969   OopMapSet* _oop_maps;
2970 public:
2971   NativeInvokerGenerator(CodeBuffer* buffer,
2972                          address call_target,
2973                          int shadow_space_bytes,
2974                          const GrowableArray<VMReg>& input_registers,
2975                          const GrowableArray<VMReg>& output_registers)
2976    : StubCodeGenerator(buffer, PrintMethodHandleStubs),

3189 
3190   //////////////////////////////////////////////////////////////////////////////
3191 
3192   __ block_comment("{ L_reguard");
3193   __ bind(L_reguard);
3194 
3195   spill_output_registers();
3196 
3197   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
3198 
3199   fill_output_registers();
3200 
3201   __ b(L_after_reguard);
3202 
3203   __ block_comment("} L_reguard");
3204 
3205   //////////////////////////////////////////////////////////////////////////////
3206 
3207   __ flush();
3208 }
3209 #endif // COMPILER2

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/compiledICHolder.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/jniHandles.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/signature.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/vframeArray.hpp"

 341     case T_SHORT:
 342     case T_INT:
 343       if (int_args < Argument::n_int_register_parameters_j) {
 344         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 345       } else {
 346         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 347         stk_args += 2;
 348       }
 349       break;
 350     case T_VOID:
 351       // halves of T_LONG or T_DOUBLE
 352       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 353       regs[i].set_bad();
 354       break;
 355     case T_LONG:
 356       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 357       // fall through
 358     case T_OBJECT:
 359     case T_ARRAY:
 360     case T_ADDRESS:
 361     case T_PRIMITIVE_OBJECT:
 362       if (int_args < Argument::n_int_register_parameters_j) {
 363         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 364       } else {
 365         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 366         stk_args += 2;
 367       }
 368       break;
 369     case T_FLOAT:
 370       if (fp_args < Argument::n_float_register_parameters_j) {
 371         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 372       } else {
 373         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 374         stk_args += 2;
 375       }
 376       break;
 377     case T_DOUBLE:
 378       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 379       if (fp_args < Argument::n_float_register_parameters_j) {
 380         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 381       } else {
 382         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 383         stk_args += 2;
 384       }
 385       break;
 386     default:
 387       ShouldNotReachHere();
 388       break;
 389     }
 390   }
 391 
 392   return align_up(stk_args, 2);
 393 }
 394 
 395 
 396 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 397 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 398 
 399 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 400 
 401   // Create the mapping between argument positions and registers.
 402 
 403   static const Register INT_ArgReg[java_return_convention_max_int] = {
 404     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 405   };
 406 
 407   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 408     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 409   };
 410 
 411   uint int_args = 0;
 412   uint fp_args = 0;
 413 
 414   for (int i = 0; i < total_args_passed; i++) {
 415     switch (sig_bt[i]) {
 416     case T_BOOLEAN:
 417     case T_CHAR:
 418     case T_BYTE:
 419     case T_SHORT:
 420     case T_INT:
 421       if (int_args < SharedRuntime::java_return_convention_max_int) {
 422         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 423         int_args ++;
 424       } else {
 425         return -1;
 426       }
 427       break;
 428     case T_VOID:
 429       // halves of T_LONG or T_DOUBLE
 430       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 431       regs[i].set_bad();
 432       break;
 433     case T_LONG:
 434       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 435       // fall through
 436     case T_OBJECT:
 437     case T_ARRAY:
 438     case T_ADDRESS:
 439       // Should T_METADATA be added to java_calling_convention as well ?
 440     case T_METADATA:
 441     case T_PRIMITIVE_OBJECT:
 442       if (int_args < SharedRuntime::java_return_convention_max_int) {
 443         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 444         int_args ++;
 445       } else {
 446         return -1;
 447       }
 448       break;
 449     case T_FLOAT:
 450       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 451         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 452         fp_args ++;
 453       } else {
 454         return -1;
 455       }
 456       break;
 457     case T_DOUBLE:
 458       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 459       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 460         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 461         fp_args ++;
 462       } else {
 463         return -1;
 464       }
 465       break;
 466     default:
 467       ShouldNotReachHere();
 468       break;
 469     }
 470   }
 471 
 472   return int_args + fp_args;
 473 }
 474 
 475 // Patch the callers callsite with entry to compiled code if it exists.
 476 static void patch_callers_callsite(MacroAssembler *masm) {
 477   Label L;
 478   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 479   __ cbz(rscratch1, L);
 480 
 481   __ enter();
 482   __ push_CPU_state();
 483 
 484   // VM needs caller's callsite
 485   // VM needs target method
 486   // This needs to be a long call since we will relocate this adapter to
 487   // the codeBuffer and it may not reach
 488 
 489 #ifndef PRODUCT
 490   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 491 #endif
 492 
 493   __ mov(c_rarg0, rmethod);
 494   __ mov(c_rarg1, lr);
 495   __ authenticate_return_address(c_rarg1, rscratch1);
 496   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 497   __ blr(rscratch1);
 498 
 499   // Explicit isb required because fixup_callers_callsite may change the code
 500   // stream.
 501   __ safepoint_isb();
 502 
 503   __ pop_CPU_state();
 504   // restore sp
 505   __ leave();
 506   __ bind(L);
 507 }
 508 
 509 // For each inline type argument, sig includes the list of fields of
 510 // the inline type. This utility function computes the number of
 511 // arguments for the call if inline types are passed by reference (the
 512 // calling convention the interpreter expects).
 513 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 514   int total_args_passed = 0;
 515   if (InlineTypePassFieldsAsArgs) {
 516      for (int i = 0; i < sig_extended->length(); i++) {
 517        BasicType bt = sig_extended->at(i)._bt;
 518        if (bt == T_PRIMITIVE_OBJECT) {
 519          // In sig_extended, an inline type argument starts with:
 520          // T_PRIMITIVE_OBJECT, followed by the types of the fields of the
 521          // inline type and T_VOID to mark the end of the value
 522          // type. Inline types are flattened so, for instance, in the
 523          // case of an inline type with an int field and an inline type
 524          // field that itself has 2 fields, an int and a long:
 525          // T_PRIMITIVE_OBJECT T_INT T_PRIMITIVE_OBJECT T_INT T_LONG T_VOID (second
 526          // slot for the T_LONG) T_VOID (inner T_PRIMITIVE_OBJECT) T_VOID
 527          // (outer T_PRIMITIVE_OBJECT)
 528          total_args_passed++;
 529          int vt = 1;
 530          do {
 531            i++;
 532            BasicType bt = sig_extended->at(i)._bt;
 533            BasicType prev_bt = sig_extended->at(i-1)._bt;
 534            if (bt == T_PRIMITIVE_OBJECT) {
 535              vt++;
 536            } else if (bt == T_VOID &&
 537                       prev_bt != T_LONG &&
 538                       prev_bt != T_DOUBLE) {
 539              vt--;
 540            }
 541          } while (vt != 0);
 542        } else {
 543          total_args_passed++;
 544        }
 545      }
 546   } else {
 547     total_args_passed = sig_extended->length();
 548   }
 549 
 550   return total_args_passed;
 551 }
 552 
 553 
 554 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 555                                    BasicType bt,
 556                                    BasicType prev_bt,
 557                                    size_t size_in_bytes,
 558                                    const VMRegPair& reg_pair,
 559                                    const Address& to,
 560                                    Register tmp1,
 561                                    Register tmp2,
 562                                    Register tmp3,
 563                                    int extraspace,
 564                                    bool is_oop) {
 565   assert(bt != T_PRIMITIVE_OBJECT || !InlineTypePassFieldsAsArgs, "no inline type here");
 566   if (bt == T_VOID) {
 567     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 568     return;
 569   }
 570 
 571   // Say 4 args:
 572   // i   st_off
 573   // 0   32 T_LONG
 574   // 1   24 T_VOID
 575   // 2   16 T_OBJECT
 576   // 3    8 T_BOOL
 577   // -    0 return address
 578   //
 579   // However to make thing extra confusing. Because we can fit a Java long/double in
 580   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 581   // leaves one slot empty and only stores to a single slot. In this case the
 582   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 583 
 584   bool wide = (size_in_bytes == wordSize);
 585   VMReg r_1 = reg_pair.first();
 586   VMReg r_2 = reg_pair.second();
 587   assert(r_2->is_valid() == wide, "invalid size");
 588   if (!r_1->is_valid()) {
 589     assert(!r_2->is_valid(), "");
 590     return;
 591   }
 592 
 593   if (!r_1->is_FloatRegister()) {
 594     Register val = tmp3;
 595     if (r_1->is_stack()) {
 596       // memory to memory use tmp3 (scratch registers are used by store_heap_oop)
 597       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 598       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 599     } else {
 600       val = r_1->as_Register();
 601     }
 602     assert_different_registers(to.base(), val, rscratch2, tmp1, tmp2);
 603     if (is_oop) {
 604       __ store_heap_oop(to, val, rscratch2, tmp1, tmp2, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 605     } else {
 606       __ store_sized_value(to, val, size_in_bytes);
 607     }
 608   } else {
 609     if (wide) {
 610       __ strd(r_1->as_FloatRegister(), to);
 611     } else {
 612       // only a float use just part of the slot
 613       __ strs(r_1->as_FloatRegister(), to);
 614     }
 615   }
 616 }
 617 
 618 static void gen_c2i_adapter(MacroAssembler *masm,
 619                             const GrowableArray<SigEntry>* sig_extended,


 620                             const VMRegPair *regs,
 621                             Label& skip_fixup,
 622                             address start,
 623                             OopMapSet* oop_maps,
 624                             int& frame_complete,
 625                             int& frame_size_in_words,
 626                             bool alloc_inline_receiver) {
 627 
 628   // Before we get into the guts of the C2I adapter, see if we should be here
 629   // at all.  We've come from compiled code and are attempting to jump to the
 630   // interpreter, which means the caller made a static call to get here
 631   // (vcalls always get a compiled target if there is one).  Check for a
 632   // compiled target.  If there is one, we need to patch the caller's call.
 633   patch_callers_callsite(masm);
 634 
 635   __ bind(skip_fixup);
 636 
 637   // Name some registers to be used in the following code. We can use
 638   // anything except r0-r7 which are arguments in the Java calling
 639   // convention, rmethod (r12), and r13 which holds the outgoing sender
 640   // SP for the interpreter.
 641   Register buf_array = r10;   // Array of buffered inline types
 642   Register buf_oop = r11;     // Buffered inline type oop
 643   Register tmp1 = r15;
 644   Register tmp2 = r16;
 645   Register tmp3 = r17;
 646 
 647   if (InlineTypePassFieldsAsArgs) {
 648     // Is there an inline type argument?
 649     bool has_inline_argument = false;
 650     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 651       has_inline_argument = (sig_extended->at(i)._bt == T_PRIMITIVE_OBJECT);
 652     }
 653     if (has_inline_argument) {
 654       // There is at least an inline type argument: we're coming from
 655       // compiled code so we have no buffers to back the inline types
 656       // Allocate the buffers here with a runtime call.
 657       RegisterSaver reg_save(false /* save_vectors */);
 658       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 659 
 660       frame_complete = __ offset();
 661       address the_pc = __ pc();
 662 
 663       Label retaddr;
 664       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 665 
 666       __ mov(c_rarg0, rthread);
 667       __ mov(c_rarg1, rmethod);
 668       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 669 
 670       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 671       __ blr(rscratch1);
 672       __ bind(retaddr);
 673 
 674       oop_maps->add_gc_map(__ pc() - start, map);
 675       __ reset_last_Java_frame(false);
 676 
 677       reg_save.restore_live_registers(masm);





 678 
 679       Label no_exception;
 680       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 681       __ cbz(rscratch1, no_exception);













 682 
 683       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 684       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 685       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 686 
 687       __ bind(no_exception);
 688 
 689       // We get an array of objects from the runtime call
 690       __ get_vm_result(buf_array, rthread);
 691       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 692     }
 693   }








 694 
 695   // Since all args are passed on the stack, total_args_passed *
 696   // Interpreter::stackElementSize is the space we need.
 697 
 698   int total_args_passed = compute_total_args_passed_int(sig_extended);
 699   int extraspace = total_args_passed * Interpreter::stackElementSize;
 700 
 701   // stack is aligned, keep it that way
 702   extraspace = align_up(extraspace, StackAlignmentInBytes);
 703 
 704   // set senderSP value
 705   __ mov(r13, sp);
 706 
 707   __ sub(sp, sp, extraspace);
 708 
 709   // Now write the args into the outgoing interpreter space
 710 
 711   // next_arg_comp is the next argument from the compiler point of
 712   // view (inline type fields are passed in registers/on the stack). In
 713   // sig_extended, an inline type argument starts with: T_PRIMITIVE_OBJECT,
 714   // followed by the types of the fields of the inline type and T_VOID
 715   // to mark the end of the inline type. ignored counts the number of
 716   // T_PRIMITIVE_OBJECT/T_VOID. next_vt_arg is the next inline type argument:
 717   // used to get the buffer for that argument from the pool of buffers
 718   // we allocated above and want to pass to the
 719   // interpreter. next_arg_int is the next argument from the
 720   // interpreter point of view (inline types are passed by reference).
 721   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 722        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 723     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 724     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 725     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 726     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 727     if (!InlineTypePassFieldsAsArgs || bt != T_PRIMITIVE_OBJECT) {
 728       int next_off = st_off - Interpreter::stackElementSize;
 729       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 730       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 731       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 732       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 733                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 734       next_arg_int++;
 735 #ifdef ASSERT
 736       if (bt == T_LONG || bt == T_DOUBLE) {
 737         // Overwrite the unused slot with known junk
 738         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 739         __ str(rscratch1, Address(sp, st_off));



 740       }















 741 #endif /* ASSERT */
 742     } else {
 743       ignored++;
 744       // get the buffer from the just allocated pool of buffers
 745       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_PRIMITIVE_OBJECT);
 746       __ load_heap_oop(buf_oop, Address(buf_array, index));
 747       next_vt_arg++; next_arg_int++;
 748       int vt = 1;
 749       // write fields we get from compiled code in registers/stack
 750       // slots to the buffer: we know we are done with that inline type
 751       // argument when we hit the T_VOID that acts as an end of inline
 752       // type delimiter for this inline type. Inline types are flattened
 753       // so we might encounter embedded inline types. Each entry in
 754       // sig_extended contains a field offset in the buffer.
 755       Label L_null;
 756       do {
 757         next_arg_comp++;
 758         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 759         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 760         if (bt == T_PRIMITIVE_OBJECT) {
 761           vt++;
 762           ignored++;
 763         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 764           vt--;
 765           ignored++;
 766         } else {
 767           int off = sig_extended->at(next_arg_comp)._offset;
 768           if (off == -1) {
 769             // Nullable inline type argument, emit null check
 770             VMReg reg = regs[next_arg_comp-ignored].first();
 771             Label L_notNull;
 772             if (reg->is_stack()) {
 773               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 774               __ ldr(tmp1, Address(sp, ld_off));
 775               __ cbnz(tmp1, L_notNull);
 776             } else {
 777               __ cbnz(reg->as_Register(), L_notNull);
 778             }
 779             __ str(zr, Address(sp, st_off));
 780             __ b(L_null);
 781             __ bind(L_notNull);
 782             continue;
 783           }
 784           assert(off > 0, "offset in object should be positive");
 785           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 786           bool is_oop = is_reference_type(bt);
 787           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 788                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 789         }
 790       } while (vt != 0);
 791       // pass the buffer to the interpreter
 792       __ str(buf_oop, Address(sp, st_off));
 793       __ bind(L_null);










 794     }
 795   }
 796 
 797   __ mov(esp, sp); // Interp expects args on caller's expression stack
 798 
 799   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 800   __ br(rscratch1);
 801 }
 802 
 803 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 804 





 805 
 806   // Note: r13 contains the senderSP on entry. We must preserve it since
 807   // we may do a i2c -> c2i transition if we lose a race where compiled
 808   // code goes non-entrant while we get args ready.
 809 
 810   // In addition we use r13 to locate all the interpreter args because
 811   // we must align the stack to 16 bytes.
 812 
 813   // Adapters are frameless.
 814 
 815   // An i2c adapter is frameless because the *caller* frame, which is
 816   // interpreted, routinely repairs its own esp (from
 817   // interpreter_frame_last_sp), even if a callee has modified the
 818   // stack pointer.  It also recalculates and aligns sp.
 819 
 820   // A c2i adapter is frameless because the *callee* frame, which is
 821   // interpreted, routinely repairs its caller's sp (from sender_sp,
 822   // which is set up via the senderSP register).
 823 
 824   // In other words, if *either* the caller or callee is interpreted, we can

 844       range_check(masm, rax, r11,
 845                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 846                   L_ok);
 847     if (StubRoutines::code1() != NULL)
 848       range_check(masm, rax, r11,
 849                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 850                   L_ok);
 851     if (StubRoutines::code2() != NULL)
 852       range_check(masm, rax, r11,
 853                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 854                   L_ok);
 855     const char* msg = "i2c adapter must return to an interpreter frame";
 856     __ block_comment(msg);
 857     __ stop(msg);
 858     __ bind(L_ok);
 859     __ block_comment("} verify_i2ce ");
 860 #endif
 861   }
 862 
 863   // Cut-out for having no stack args.
 864   int comp_words_on_stack = 0;
 865   if (comp_args_on_stack) {
 866      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 867      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 868      __ andr(sp, rscratch1, -16);
 869   }
 870 
 871   // Will jump to the compiled code just as if compiled code was doing it.
 872   // Pre-load the register-jump target early, to schedule it better.
 873   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 874 
 875 #if INCLUDE_JVMCI
 876   if (EnableJVMCI) {
 877     // check if this call should be routed towards a specific entry point
 878     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 879     Label no_alternative_target;
 880     __ cbz(rscratch2, no_alternative_target);
 881     __ mov(rscratch1, rscratch2);
 882     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 883     __ bind(no_alternative_target);
 884   }
 885 #endif // INCLUDE_JVMCI
 886 
 887   int total_args_passed = sig->length();
 888 
 889   // Now generate the shuffle code.
 890   for (int i = 0; i < total_args_passed; i++) {
 891     BasicType bt = sig->at(i)._bt;
 892 
 893     assert(bt != T_PRIMITIVE_OBJECT, "i2c adapter doesn't unpack inline typ args");
 894     if (bt == T_VOID) {
 895       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 896       continue;
 897     }
 898 
 899     // Pick up 0, 1 or 2 words from SP+offset.
 900     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 901 


 902     // Load in argument order going down.
 903     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 904     // Point to interpreter value (vs. tag)
 905     int next_off = ld_off - Interpreter::stackElementSize;
 906     //
 907     //
 908     //
 909     VMReg r_1 = regs[i].first();
 910     VMReg r_2 = regs[i].second();
 911     if (!r_1->is_valid()) {
 912       assert(!r_2->is_valid(), "");
 913       continue;
 914     }
 915     if (r_1->is_stack()) {
 916       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 917       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 918       if (!r_2->is_valid()) {
 919         // sign extend???
 920         __ ldrsw(rscratch2, Address(esp, ld_off));
 921         __ str(rscratch2, Address(sp, st_off));
 922       } else {
 923         //
 924         // We are using two optoregs. This can be either T_OBJECT,
 925         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 926         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 927         // So we must adjust where to pick up the data to match the
 928         // interpreter.
 929         //
 930         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 931         // are accessed as negative so LSW is at LOW address
 932 
 933         // ld_off is MSW so get LSW
 934         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 935         __ ldr(rscratch2, Address(esp, offset));
 936         // st_off is LSW (i.e. reg.first())
 937          __ str(rscratch2, Address(sp, st_off));
 938        }
 939      } else if (r_1->is_Register()) {  // Register argument
 940        Register r = r_1->as_Register();
 941        if (r_2->is_valid()) {
 942          //
 943          // We are using two VMRegs. This can be either T_OBJECT,
 944          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 945          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 946          // So we must adjust where to pick up the data to match the
 947          // interpreter.
 948 
 949         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 950 
 951          // this can be a misaligned move
 952          __ ldr(r, Address(esp, offset));
 953        } else {
 954          // sign extend and use a full word?
 955          __ ldrw(r, Address(esp, ld_off));
 956        }
 957      } else {
 958        if (!r_2->is_valid()) {
 959          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 960        } else {
 961          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 962        }
 963      }
 964    }
 965 

















 966 
 967   // 6243940 We might end up in handle_wrong_method if
 968   // the callee is deoptimized as we race thru here. If that
 969   // happens we don't want to take a safepoint because the
 970   // caller frame will look interpreted and arguments are now
 971   // "compiled" so it is much better to make this transition
 972   // invisible to the stack walking code. Unfortunately if
 973   // we try and find the callee by normal means a safepoint
 974   // is possible. So we stash the desired callee in the thread
 975   // and the vm will find there should this case occur.
 976 
 977   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 978   __ br(rscratch1);
 979 }
 980 
 981 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {












 982 
 983   Label ok;
 984 
 985   Register holder = rscratch2;
 986   Register receiver = j_rarg0;
 987   Register tmp = r10;  // A call-clobbered register not used for arg passing
 988 
 989   // -------------------------------------------------------------------------
 990   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 991   // to the interpreter.  The args start out packed in the compiled layout.  They
 992   // need to be unpacked into the interpreter layout.  This will almost always
 993   // require some stack space.  We grow the current (compiled) stack, then repack
 994   // the args.  We  finally end in a jump to the generic interpreter entry point.
 995   // On exit from the interpreter, the interpreter will restore our SP (lest the
 996   // compiled code, which relys solely on SP and not FP, get sick).
 997 
 998   {
 999     __ block_comment("c2i_unverified_entry {");
1000     __ load_klass(rscratch1, receiver);
1001     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
1002     __ cmp(rscratch1, tmp);
1003     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
1004     __ br(Assembler::EQ, ok);
1005     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1006 
1007     __ bind(ok);
1008     // Method might have been compiled since the call site was patched to
1009     // interpreted; if that is the case treat it as a miss so we can get
1010     // the call site corrected.
1011     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
1012     __ cbz(rscratch1, skip_fixup);
1013     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1014     __ block_comment("} c2i_unverified_entry");
1015   }
1016 }
1017 
1018 
1019 // ---------------------------------------------------------------
1020 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1021                                                             int comp_args_on_stack,
1022                                                             const GrowableArray<SigEntry>* sig,
1023                                                             const VMRegPair* regs,
1024                                                             const GrowableArray<SigEntry>* sig_cc,
1025                                                             const VMRegPair* regs_cc,
1026                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1027                                                             const VMRegPair* regs_cc_ro,
1028                                                             AdapterFingerPrint* fingerprint,
1029                                                             AdapterBlob*& new_adapter,
1030                                                             bool allocate_code_blob) {
1031 
1032   address i2c_entry = __ pc();
1033   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1034 
1035   address c2i_unverified_entry = __ pc();
1036   Label skip_fixup;
1037 
1038   gen_inline_cache_check(masm, skip_fixup);
1039 
1040   OopMapSet* oop_maps = new OopMapSet();
1041   int frame_complete = CodeOffsets::frame_never_safe;
1042   int frame_size_in_words = 0;
1043 
1044   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1045   address c2i_inline_ro_entry = __ pc();
1046   if (regs_cc != regs_cc_ro) {
1047     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false);
1048     skip_fixup.reset();
1049   }
1050 
1051   // Scalarized c2i adapter
1052   address c2i_entry = __ pc();
1053 
1054   // Class initialization barrier for static methods
1055   address c2i_no_clinit_check_entry = NULL;
1056   if (VM_Version::supports_fast_class_init_checks()) {
1057     Label L_skip_barrier;
1058 
1059     { // Bypass the barrier for non-static methods
1060       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
1061       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
1062       __ br(Assembler::EQ, L_skip_barrier); // non-static
1063     }
1064 
1065     __ load_method_holder(rscratch2, rmethod);
1066     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1067     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1068 
1069     __ bind(L_skip_barrier);
1070     c2i_no_clinit_check_entry = __ pc();
1071   }
1072 
1073   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1074   bs->c2i_entry_barrier(masm);
1075 
1076   gen_c2i_adapter(masm, sig_cc, regs_cc, skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, true);
1077 
1078   address c2i_unverified_inline_entry = c2i_unverified_entry;
1079 
1080   // Non-scalarized c2i adapter
1081   address c2i_inline_entry = c2i_entry;
1082   if (regs != regs_cc) {
1083     Label inline_entry_skip_fixup;
1084     c2i_unverified_inline_entry = __ pc();
1085     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1086 
1087     c2i_inline_entry = __ pc();
1088     gen_c2i_adapter(masm, sig, regs, inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, false);
1089   }
1090 
1091   __ flush();
1092 
1093   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1094   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1095   if (allocate_code_blob) {
1096     bool caller_must_gc_arguments = (regs != regs_cc);
1097     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1098   }
1099 
1100   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1101 }
1102 
1103 static int c_calling_convention_priv(const BasicType *sig_bt,
1104                                          VMRegPair *regs,
1105                                          VMRegPair *regs2,
1106                                          int total_args_passed) {
1107   assert(regs2 == NULL, "not needed on AArch64");
1108 
1109 // We return the amount of VMRegImpl stack slots we need to reserve for all
1110 // the arguments NOT counting out_preserve_stack_slots.
1111 
1112     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1113       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1114     };
1115     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1116       c_farg0, c_farg1, c_farg2, c_farg3,
1117       c_farg4, c_farg5, c_farg6, c_farg7
1118     };
1119 
1120     uint int_args = 0;

1128       case T_BYTE:
1129       case T_SHORT:
1130       case T_INT:
1131         if (int_args < Argument::n_int_register_parameters_c) {
1132           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1133         } else {
1134 #ifdef __APPLE__
1135           // Less-than word types are stored one after another.
1136           // The code is unable to handle this so bailout.
1137           return -1;
1138 #endif
1139           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1140           stk_args += 2;
1141         }
1142         break;
1143       case T_LONG:
1144         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1145         // fall through
1146       case T_OBJECT:
1147       case T_ARRAY:
1148       case T_PRIMITIVE_OBJECT:
1149       case T_ADDRESS:
1150       case T_METADATA:
1151         if (int_args < Argument::n_int_register_parameters_c) {
1152           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1153         } else {
1154           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1155           stk_args += 2;
1156         }
1157         break;
1158       case T_FLOAT:
1159         if (fp_args < Argument::n_float_register_parameters_c) {
1160           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1161         } else {
1162 #ifdef __APPLE__
1163           // Less-than word types are stored one after another.
1164           // The code is unable to handle this so bailout.
1165           return -1;
1166 #endif
1167           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1168           stk_args += 2;

1803   int temploc = -1;
1804   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1805     int i = arg_order.at(ai);
1806     int c_arg = arg_order.at(ai + 1);
1807     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1808     assert(c_arg != -1 && i != -1, "wrong order");
1809 #ifdef ASSERT
1810     if (in_regs[i].first()->is_Register()) {
1811       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1812     } else if (in_regs[i].first()->is_FloatRegister()) {
1813       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1814     }
1815     if (out_regs[c_arg].first()->is_Register()) {
1816       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1817     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1818       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1819     }
1820 #endif /* ASSERT */
1821     switch (in_sig_bt[i]) {
1822       case T_ARRAY:
1823       case T_PRIMITIVE_OBJECT:
1824       case T_OBJECT:
1825         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1826                     ((i == 0) && (!is_static)),
1827                     &receiver_offset);
1828         int_args++;
1829         break;
1830       case T_VOID:
1831         break;
1832 
1833       case T_FLOAT:
1834         float_move(masm, in_regs[i], out_regs[c_arg]);
1835         float_args++;
1836         break;
1837 
1838       case T_DOUBLE:
1839         assert( i + 1 < total_in_args &&
1840                 in_sig_bt[i + 1] == T_VOID &&
1841                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1842         double_move(masm, in_regs[i], out_regs[c_arg]);
1843         float_args++;

1919   Label lock_done;
1920 
1921   if (method->is_synchronized()) {
1922 
1923     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1924 
1925     // Get the handle (the 2nd argument)
1926     __ mov(oop_handle_reg, c_rarg1);
1927 
1928     // Get address of the box
1929 
1930     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1931 
1932     // Load the oop from the handle
1933     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1934 
1935     if (!UseHeavyMonitors) {
1936       // Load (object->mark() | 1) into swap_reg %r0
1937       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1938       __ orr(swap_reg, rscratch1, 1);
1939       if (EnableValhalla) {
1940         // Mask inline_type bit such that we go to the slow path if object is an inline type
1941         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
1942       }
1943 
1944       // Save (object->mark() | 1) into BasicLock's displaced header
1945       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1946 
1947       // src -> dest iff dest == r0 else r0 <- dest
1948       { Label here;
1949         __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1950       }
1951 
1952       // Hmm should this move to the slow path code area???
1953 
1954       // Test if the oopMark is an obvious stack pointer, i.e.,
1955       //  1) (mark & 3) == 0, and
1956       //  2) sp <= mark < mark + os::pagesize()
1957       // These 3 tests can be done by evaluating the following
1958       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1959       // assuming both stack pointer and pagesize have their
1960       // least significant 2 bits clear.
1961       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1962 

1988 
1989   rt_call(masm, native_func);
1990 
1991   __ bind(native_return);
1992 
1993   intptr_t return_pc = (intptr_t) __ pc();
1994   oop_maps->add_gc_map(return_pc - start, map);
1995 
1996   // Unpack native results.
1997   switch (ret_type) {
1998   case T_BOOLEAN: __ c2bool(r0);                     break;
1999   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2000   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2001   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2002   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2003   case T_DOUBLE :
2004   case T_FLOAT  :
2005     // Result is in v0 we'll save as needed
2006     break;
2007   case T_ARRAY:                 // Really a handle
2008   case T_PRIMITIVE_OBJECT:           // Really a handle
2009   case T_OBJECT:                // Really a handle
2010       break; // can't de-handlize until after safepoint check
2011   case T_VOID: break;
2012   case T_LONG: break;
2013   default       : ShouldNotReachHere();
2014   }
2015 
2016   Label safepoint_in_progress, safepoint_in_progress_done;
2017   Label after_transition;
2018 
2019   // Switch thread to "native transition" state before reading the synchronization state.
2020   // This additional state is necessary because reading and testing the synchronization
2021   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2022   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2023   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2024   //     Thread A is resumed to finish this native method, but doesn't block here since it
2025   //     didn't see any synchronization is progress, and escapes.
2026   __ mov(rscratch1, _thread_in_native_trans);
2027 
2028   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));

3231 #ifdef ASSERT
3232   __ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset()));
3233   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3234 #endif
3235   // Clear the exception oop so GC no longer processes it as a root.
3236   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3237 
3238   // r0: exception oop
3239   // r8:  exception handler
3240   // r4: exception pc
3241   // Jump to handler
3242 
3243   __ br(r8);
3244 
3245   // Make sure all code is generated
3246   masm->flush();
3247 
3248   // Set exception blob
3249   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3250 }
3251 #endif // COMPILER2
3252 
3253 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3254   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3255   CodeBuffer buffer(buf);
3256   short buffer_locs[20];
3257   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3258                                          sizeof(buffer_locs)/sizeof(relocInfo));
3259 
3260   MacroAssembler _masm(&buffer);
3261   MacroAssembler* masm = &_masm;
3262 
3263   const Array<SigEntry>* sig_vk = vk->extended_sig();
3264   const Array<VMRegPair>* regs = vk->return_regs();
3265 
3266   int pack_fields_jobject_off = __ offset();
3267   // Resolve pre-allocated buffer from JNI handle.
3268   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3269   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3270   __ ldr(r0, Address(Rresult));
3271   __ resolve_jobject(r0 /* value */,
3272                      rthread /* thread */,
3273                      r12 /* tmp */);
3274   __ str(r0, Address(Rresult));
3275 
3276   int pack_fields_off = __ offset();
3277 
3278   int j = 1;
3279   for (int i = 0; i < sig_vk->length(); i++) {
3280     BasicType bt = sig_vk->at(i)._bt;
3281     if (bt == T_PRIMITIVE_OBJECT) {
3282       continue;
3283     }
3284     if (bt == T_VOID) {
3285       if (sig_vk->at(i-1)._bt == T_LONG ||
3286           sig_vk->at(i-1)._bt == T_DOUBLE) {
3287         j++;
3288       }
3289       continue;
3290     }
3291     int off = sig_vk->at(i)._offset;
3292     VMRegPair pair = regs->at(j);
3293     VMReg r_1 = pair.first();
3294     VMReg r_2 = pair.second();
3295     Address to(r0, off);
3296     if (bt == T_FLOAT) {
3297       __ strs(r_1->as_FloatRegister(), to);
3298     } else if (bt == T_DOUBLE) {
3299       __ strd(r_1->as_FloatRegister(), to);
3300     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3301       Register val = r_1->as_Register();
3302       assert_different_registers(r0, val);
3303       // We don't need barriers because the destination is a newly allocated object.
3304       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
3305       if (UseCompressedOops) {
3306         __ encode_heap_oop(val);
3307         __ str(val, to);
3308       } else {
3309         __ str(val, to);
3310       }
3311     } else {
3312       assert(is_java_primitive(bt), "unexpected basic type");
3313       assert_different_registers(r0, r_1->as_Register());
3314       size_t size_in_bytes = type2aelembytes(bt);
3315       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
3316     }
3317     j++;
3318   }
3319   assert(j == regs->length(), "missed a field?");
3320 
3321   __ ret(lr);
3322 
3323   int unpack_fields_off = __ offset();
3324 
3325   Label skip;
3326   __ cbz(r0, skip);
3327 
3328   j = 1;
3329   for (int i = 0; i < sig_vk->length(); i++) {
3330     BasicType bt = sig_vk->at(i)._bt;
3331     if (bt == T_PRIMITIVE_OBJECT) {
3332       continue;
3333     }
3334     if (bt == T_VOID) {
3335       if (sig_vk->at(i-1)._bt == T_LONG ||
3336           sig_vk->at(i-1)._bt == T_DOUBLE) {
3337         j++;
3338       }
3339       continue;
3340     }
3341     int off = sig_vk->at(i)._offset;
3342     assert(off > 0, "offset in object should be positive");
3343     VMRegPair pair = regs->at(j);
3344     VMReg r_1 = pair.first();
3345     VMReg r_2 = pair.second();
3346     Address from(r0, off);
3347     if (bt == T_FLOAT) {
3348       __ ldrs(r_1->as_FloatRegister(), from);
3349     } else if (bt == T_DOUBLE) {
3350       __ ldrd(r_1->as_FloatRegister(), from);
3351     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3352       assert_different_registers(r0, r_1->as_Register());
3353       __ load_heap_oop(r_1->as_Register(), from);
3354     } else {
3355       assert(is_java_primitive(bt), "unexpected basic type");
3356       assert_different_registers(r0, r_1->as_Register());
3357 
3358       size_t size_in_bytes = type2aelembytes(bt);
3359       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3360     }
3361     j++;
3362   }
3363   assert(j == regs->length(), "missed a field?");
3364 
3365   __ bind(skip);
3366 
3367   __ ret(lr);
3368 
3369   __ flush();
3370 
3371   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3372 }
3373 
3374 // ---------------------------------------------------------------
3375 
3376 class NativeInvokerGenerator : public StubCodeGenerator {
3377   address _call_target;
3378   int _shadow_space_bytes;
3379 
3380   const GrowableArray<VMReg>& _input_registers;
3381   const GrowableArray<VMReg>& _output_registers;
3382 
3383   int _frame_complete;
3384   int _framesize;
3385   OopMapSet* _oop_maps;
3386 public:
3387   NativeInvokerGenerator(CodeBuffer* buffer,
3388                          address call_target,
3389                          int shadow_space_bytes,
3390                          const GrowableArray<VMReg>& input_registers,
3391                          const GrowableArray<VMReg>& output_registers)
3392    : StubCodeGenerator(buffer, PrintMethodHandleStubs),

3605 
3606   //////////////////////////////////////////////////////////////////////////////
3607 
3608   __ block_comment("{ L_reguard");
3609   __ bind(L_reguard);
3610 
3611   spill_output_registers();
3612 
3613   rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
3614 
3615   fill_output_registers();
3616 
3617   __ b(L_after_reguard);
3618 
3619   __ block_comment("} L_reguard");
3620 
3621   //////////////////////////////////////////////////////////////////////////////
3622 
3623   __ flush();
3624 }

< prev index next >