< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"

  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSetAssembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interp_masm.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/compiledICHolder.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "oops/method.inline.hpp"
  45 #include "prims/methodHandles.hpp"
  46 #include "runtime/continuation.hpp"
  47 #include "runtime/continuationEntry.inline.hpp"
  48 #include "runtime/globals.hpp"
  49 #include "runtime/jniHandles.hpp"

 320     case T_SHORT:
 321     case T_INT:
 322       if (int_args < Argument::n_int_register_parameters_j) {
 323         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 324       } else {
 325         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 326         stk_args += 2;
 327       }
 328       break;
 329     case T_VOID:
 330       // halves of T_LONG or T_DOUBLE
 331       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 332       regs[i].set_bad();
 333       break;
 334     case T_LONG:
 335       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 336       // fall through
 337     case T_OBJECT:
 338     case T_ARRAY:
 339     case T_ADDRESS:

 340       if (int_args < Argument::n_int_register_parameters_j) {
 341         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 342       } else {
 343         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 344         stk_args += 2;
 345       }
 346       break;
 347     case T_FLOAT:
 348       if (fp_args < Argument::n_float_register_parameters_j) {
 349         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 350       } else {
 351         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 352         stk_args += 2;
 353       }
 354       break;
 355     case T_DOUBLE:
 356       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 357       if (fp_args < Argument::n_float_register_parameters_j) {
 358         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 359       } else {
 360         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 361         stk_args += 2;
 362       }
 363       break;
 364     default:
 365       ShouldNotReachHere();
 366       break;
 367     }
 368   }
 369 
 370   return align_up(stk_args, 2);
 371 }
 372 
















































































 373 // Patch the callers callsite with entry to compiled code if it exists.
 374 static void patch_callers_callsite(MacroAssembler *masm) {
 375   Label L;
 376   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 377   __ cbz(rscratch1, L);
 378 
 379   __ enter();
 380   __ push_CPU_state();
 381 
 382   // VM needs caller's callsite
 383   // VM needs target method
 384   // This needs to be a long call since we will relocate this adapter to
 385   // the codeBuffer and it may not reach
 386 
 387 #ifndef PRODUCT
 388   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 389 #endif
 390 
 391   __ mov(c_rarg0, rmethod);
 392   __ mov(c_rarg1, lr);
 393   __ authenticate_return_address(c_rarg1, rscratch1);
 394   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 395   __ blr(rscratch1);
 396 
 397   // Explicit isb required because fixup_callers_callsite may change the code
 398   // stream.
 399   __ safepoint_isb();
 400 
 401   __ pop_CPU_state();
 402   // restore sp
 403   __ leave();
 404   __ bind(L);
 405 }
 406 













































































































 407 static void gen_c2i_adapter(MacroAssembler *masm,
 408                             int total_args_passed,
 409                             int comp_args_on_stack,
 410                             const BasicType *sig_bt,
 411                             const VMRegPair *regs,
 412                             Label& skip_fixup) {



























 413   // Before we get into the guts of the C2I adapter, see if we should be here
 414   // at all.  We've come from compiled code and are attempting to jump to the
 415   // interpreter, which means the caller made a static call to get here
 416   // (vcalls always get a compiled target if there is one).  Check for a
 417   // compiled target.  If there is one, we need to patch the caller's call.
 418   patch_callers_callsite(masm);
 419 
 420   __ bind(skip_fixup);
 421 
 422   int words_pushed = 0;





















 423 
 424   // Since all args are passed on the stack, total_args_passed *
 425   // Interpreter::stackElementSize is the space we need.
 426 
 427   int extraspace = total_args_passed * Interpreter::stackElementSize;

 428 
 429   __ mov(r19_sender_sp, sp);


 430 
 431   // stack is aligned, keep it that way
 432   extraspace = align_up(extraspace, 2*wordSize);

 433 
 434   if (extraspace)
 435     __ sub(sp, sp, extraspace);
 436 
 437   // Now write the args into the outgoing interpreter space
 438   for (int i = 0; i < total_args_passed; i++) {
 439     if (sig_bt[i] == T_VOID) {
 440       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
 441       continue;
 442     }
 443 
 444     // offset to start parameters
 445     int st_off   = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 446     int next_off = st_off - Interpreter::stackElementSize;
 447 
 448     // Say 4 args:
 449     // i   st_off
 450     // 0   32 T_LONG
 451     // 1   24 T_VOID
 452     // 2   16 T_OBJECT
 453     // 3    8 T_BOOL
 454     // -    0 return address
 455     //
 456     // However to make thing extra confusing. Because we can fit a Java long/double in
 457     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 458     // leaves one slot empty and only stores to a single slot. In this case the
 459     // slot that is occupied is the T_VOID slot. See I said it was confusing.
 460 
 461     VMReg r_1 = regs[i].first();
 462     VMReg r_2 = regs[i].second();
 463     if (!r_1->is_valid()) {
 464       assert(!r_2->is_valid(), "");
 465       continue;




 466     }
 467     if (r_1->is_stack()) {
 468       // memory to memory use rscratch1
 469       int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
 470                     + extraspace
 471                     + words_pushed * wordSize);
 472       if (!r_2->is_valid()) {
 473         // sign extend??
 474         __ ldrw(rscratch1, Address(sp, ld_off));
 475         __ str(rscratch1, Address(sp, st_off));
 476 
 477       } else {







 478 
 479         __ ldr(rscratch1, Address(sp, ld_off));

 480 
 481         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 482         // T_DOUBLE and T_LONG use two slots in the interpreter
 483         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 484           // ld_off == LSW, ld_off+wordSize == MSW
 485           // st_off == MSW, next_off == LSW
 486           __ str(rscratch1, Address(sp, next_off));






















 487 #ifdef ASSERT
 488           // Overwrite the unused slot with known junk
 489           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
 490           __ str(rscratch1, Address(sp, st_off));
 491 #endif /* ASSERT */
 492         } else {
 493           __ str(rscratch1, Address(sp, st_off));
 494         }
 495       }
 496     } else if (r_1->is_Register()) {
 497       Register r = r_1->as_Register();
 498       if (!r_2->is_valid()) {
 499         // must be only an int (or less ) so move only 32bits to slot
 500         // why not sign extend??
 501         __ str(r, Address(sp, st_off));
 502       } else {
 503         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
 504         // T_DOUBLE and T_LONG use two slots in the interpreter
 505         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
 506           // jlong/double in gpr
 507 #ifdef ASSERT
 508           // Overwrite the unused slot with known junk
 509           __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
 510           __ str(rscratch1, Address(sp, st_off));
 511 #endif /* ASSERT */
 512           __ str(r, Address(sp, next_off));























 513         } else {
 514           __ str(r, Address(sp, st_off));





















 515         }
 516       }
 517     } else {
 518       assert(r_1->is_FloatRegister(), "");
 519       if (!r_2->is_valid()) {
 520         // only a float use just part of the slot
 521         __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
 522       } else {
 523 #ifdef ASSERT
 524         // Overwrite the unused slot with known junk
 525         __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
 526         __ str(rscratch1, Address(sp, st_off));
 527 #endif /* ASSERT */
 528         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
 529       }
 530     }
 531   }
 532 
 533   __ mov(esp, sp); // Interp expects args on caller's expression stack
 534 
 535   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 536   __ br(rscratch1);
 537 }
 538 

 539 
 540 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
 541                                     int total_args_passed,
 542                                     int comp_args_on_stack,
 543                                     const BasicType *sig_bt,
 544                                     const VMRegPair *regs) {
 545 
 546   // Note: r19_sender_sp contains the senderSP on entry. We must
 547   // preserve it since we may do a i2c -> c2i transition if we lose a
 548   // race where compiled code goes non-entrant while we get args
 549   // ready.
 550 
 551   // Adapters are frameless.
 552 
 553   // An i2c adapter is frameless because the *caller* frame, which is
 554   // interpreted, routinely repairs its own esp (from
 555   // interpreter_frame_last_sp), even if a callee has modified the
 556   // stack pointer.  It also recalculates and aligns sp.
 557 
 558   // A c2i adapter is frameless because the *callee* frame, which is
 559   // interpreted, routinely repairs its caller's sp (from sender_sp,
 560   // which is set up via the senderSP register).
 561 
 562   // In other words, if *either* the caller or callee is interpreted, we can
 563   // get the stack pointer repaired after a call.
 564 

 582       range_check(masm, rax, r11,
 583                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 584                   L_ok);
 585     if (StubRoutines::code1() != NULL)
 586       range_check(masm, rax, r11,
 587                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 588                   L_ok);
 589     if (StubRoutines::code2() != NULL)
 590       range_check(masm, rax, r11,
 591                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 592                   L_ok);
 593     const char* msg = "i2c adapter must return to an interpreter frame";
 594     __ block_comment(msg);
 595     __ stop(msg);
 596     __ bind(L_ok);
 597     __ block_comment("} verify_i2ce ");
 598 #endif
 599   }
 600 
 601   // Cut-out for having no stack args.
 602   int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
 603   if (comp_args_on_stack) {
 604     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 605     __ andr(sp, rscratch1, -16);

 606   }
 607 
 608   // Will jump to the compiled code just as if compiled code was doing it.
 609   // Pre-load the register-jump target early, to schedule it better.
 610   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
 611 
 612 #if INCLUDE_JVMCI
 613   if (EnableJVMCI) {
 614     // check if this call should be routed towards a specific entry point
 615     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 616     Label no_alternative_target;
 617     __ cbz(rscratch2, no_alternative_target);
 618     __ mov(rscratch1, rscratch2);
 619     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 620     __ bind(no_alternative_target);
 621   }
 622 #endif // INCLUDE_JVMCI
 623 


 624   // Now generate the shuffle code.
 625   for (int i = 0; i < total_args_passed; i++) {
 626     if (sig_bt[i] == T_VOID) {
 627       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");



 628       continue;
 629     }
 630 
 631     // Pick up 0, 1 or 2 words from SP+offset.

 632 
 633     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
 634             "scrambled load targets?");
 635     // Load in argument order going down.
 636     int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
 637     // Point to interpreter value (vs. tag)
 638     int next_off = ld_off - Interpreter::stackElementSize;
 639     //
 640     //
 641     //
 642     VMReg r_1 = regs[i].first();
 643     VMReg r_2 = regs[i].second();
 644     if (!r_1->is_valid()) {
 645       assert(!r_2->is_valid(), "");
 646       continue;
 647     }
 648     if (r_1->is_stack()) {
 649       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 650       int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
 651       if (!r_2->is_valid()) {
 652         // sign extend???
 653         __ ldrsw(rscratch2, Address(esp, ld_off));
 654         __ str(rscratch2, Address(sp, st_off));
 655       } else {
 656         //
 657         // We are using two optoregs. This can be either T_OBJECT,
 658         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 659         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 660         // So we must adjust where to pick up the data to match the
 661         // interpreter.
 662         //
 663         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 664         // are accessed as negative so LSW is at LOW address
 665 
 666         // ld_off is MSW so get LSW
 667         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 668                            next_off : ld_off;
 669         __ ldr(rscratch2, Address(esp, offset));
 670         // st_off is LSW (i.e. reg.first())
 671         __ str(rscratch2, Address(sp, st_off));
 672       }
 673     } else if (r_1->is_Register()) {  // Register argument
 674       Register r = r_1->as_Register();
 675       if (r_2->is_valid()) {
 676         //
 677         // We are using two VMRegs. This can be either T_OBJECT,
 678         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 679         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 680         // So we must adjust where to pick up the data to match the
 681         // interpreter.

















 682 
 683         const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
 684                            next_off : ld_off;
 685 
 686         // this can be a misaligned move
 687         __ ldr(r, Address(esp, offset));
 688       } else {
 689         // sign extend and use a full word?
 690         __ ldrw(r, Address(esp, ld_off));
 691       }
 692     } else {
 693       if (!r_2->is_valid()) {
 694         __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 695       } else {
 696         __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 697       }
 698     }
 699   }
 700 
 701   __ mov(rscratch2, rscratch1);
 702   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 703   __ mov(rscratch1, rscratch2);
 704 
 705   // 6243940 We might end up in handle_wrong_method if
 706   // the callee is deoptimized as we race thru here. If that
 707   // happens we don't want to take a safepoint because the
 708   // caller frame will look interpreted and arguments are now
 709   // "compiled" so it is much better to make this transition
 710   // invisible to the stack walking code. Unfortunately if
 711   // we try and find the callee by normal means a safepoint
 712   // is possible. So we stash the desired callee in the thread
 713   // and the vm will find there should this case occur.
 714 
 715   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
 716 
 717   __ br(rscratch1);
 718 }
 719 
 720 // ---------------------------------------------------------------
 721 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
 722                                                             int total_args_passed,
 723                                                             int comp_args_on_stack,
 724                                                             const BasicType *sig_bt,
 725                                                             const VMRegPair *regs,
 726                                                             AdapterFingerPrint* fingerprint) {
 727   address i2c_entry = __ pc();
 728 
 729   gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
 730 
 731   address c2i_unverified_entry = __ pc();
 732   Label skip_fixup;
 733 
 734   Label ok;
 735 
 736   Register holder = rscratch2;
 737   Register receiver = j_rarg0;
 738   Register tmp = r10;  // A call-clobbered register not used for arg passing
 739 
 740   // -------------------------------------------------------------------------
 741   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 742   // to the interpreter.  The args start out packed in the compiled layout.  They
 743   // need to be unpacked into the interpreter layout.  This will almost always
 744   // require some stack space.  We grow the current (compiled) stack, then repack
 745   // the args.  We  finally end in a jump to the generic interpreter entry point.
 746   // On exit from the interpreter, the interpreter will restore our SP (lest the
 747   // compiled code, which relies solely on SP and not FP, get sick).
 748 
 749   {
 750     __ block_comment("c2i_unverified_entry {");
 751     __ load_klass(rscratch1, receiver);
 752     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
 753     __ cmp(rscratch1, tmp);
 754     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
 755     __ br(Assembler::EQ, ok);
 756     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 757 
 758     __ bind(ok);
 759     // Method might have been compiled since the call site was patched to
 760     // interpreted; if that is the case treat it as a miss so we can get
 761     // the call site corrected.
 762     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 763     __ cbz(rscratch1, skip_fixup);
 764     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 765     __ block_comment("} c2i_unverified_entry");
 766   }

 767 
 768   address c2i_entry = __ pc();
 769 
 770   // Class initialization barrier for static methods
 771   address c2i_no_clinit_check_entry = NULL;
 772   if (VM_Version::supports_fast_class_init_checks()) {
 773     Label L_skip_barrier;








 774 
 775     { // Bypass the barrier for non-static methods
 776       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 777       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 778       __ br(Assembler::EQ, L_skip_barrier); // non-static
 779     }
 780 
 781     __ load_method_holder(rscratch2, rmethod);
 782     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 783     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 784 
 785     __ bind(L_skip_barrier);
 786     c2i_no_clinit_check_entry = __ pc();
 787   }
 788 
 789   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 790   bs->c2i_entry_barrier(masm);

 791 
 792   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);

























 793 
 794   __ flush();
 795   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);








 796 }
 797 
 798 static int c_calling_convention_priv(const BasicType *sig_bt,
 799                                          VMRegPair *regs,
 800                                          VMRegPair *regs2,
 801                                          int total_args_passed) {
 802   assert(regs2 == NULL, "not needed on AArch64");
 803 
 804 // We return the amount of VMRegImpl stack slots we need to reserve for all
 805 // the arguments NOT counting out_preserve_stack_slots.
 806 
 807     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 808       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 809     };
 810     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 811       c_farg0, c_farg1, c_farg2, c_farg3,
 812       c_farg4, c_farg5, c_farg6, c_farg7
 813     };
 814 
 815     uint int_args = 0;

 823       case T_BYTE:
 824       case T_SHORT:
 825       case T_INT:
 826         if (int_args < Argument::n_int_register_parameters_c) {
 827           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 828         } else {
 829 #ifdef __APPLE__
 830           // Less-than word types are stored one after another.
 831           // The code is unable to handle this so bailout.
 832           return -1;
 833 #endif
 834           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 835           stk_args += 2;
 836         }
 837         break;
 838       case T_LONG:
 839         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 840         // fall through
 841       case T_OBJECT:
 842       case T_ARRAY:

 843       case T_ADDRESS:
 844       case T_METADATA:
 845         if (int_args < Argument::n_int_register_parameters_c) {
 846           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 847         } else {
 848           regs[i].set2(VMRegImpl::stack2reg(stk_args));
 849           stk_args += 2;
 850         }
 851         break;
 852       case T_FLOAT:
 853         if (fp_args < Argument::n_float_register_parameters_c) {
 854           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 855         } else {
 856 #ifdef __APPLE__
 857           // Less-than word types are stored one after another.
 858           // The code is unable to handle this so bailout.
 859           return -1;
 860 #endif
 861           regs[i].set1(VMRegImpl::stack2reg(stk_args));
 862           stk_args += 2;

1638   int temploc = -1;
1639   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1640     int i = arg_order.at(ai);
1641     int c_arg = arg_order.at(ai + 1);
1642     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1643     assert(c_arg != -1 && i != -1, "wrong order");
1644 #ifdef ASSERT
1645     if (in_regs[i].first()->is_Register()) {
1646       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1647     } else if (in_regs[i].first()->is_FloatRegister()) {
1648       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1649     }
1650     if (out_regs[c_arg].first()->is_Register()) {
1651       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1652     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1653       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1654     }
1655 #endif /* ASSERT */
1656     switch (in_sig_bt[i]) {
1657       case T_ARRAY:

1658       case T_OBJECT:
1659         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1660                        ((i == 0) && (!is_static)),
1661                        &receiver_offset);
1662         int_args++;
1663         break;
1664       case T_VOID:
1665         break;
1666 
1667       case T_FLOAT:
1668         __ float_move(in_regs[i], out_regs[c_arg]);
1669         float_args++;
1670         break;
1671 
1672       case T_DOUBLE:
1673         assert( i + 1 < total_in_args &&
1674                 in_sig_bt[i + 1] == T_VOID &&
1675                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1676         __ double_move(in_regs[i], out_regs[c_arg]);
1677         float_args++;

1752   Label lock_done;
1753 
1754   if (method->is_synchronized()) {
1755     Label count;
1756     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1757 
1758     // Get the handle (the 2nd argument)
1759     __ mov(oop_handle_reg, c_rarg1);
1760 
1761     // Get address of the box
1762 
1763     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1764 
1765     // Load the oop from the handle
1766     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1767 
1768     if (!UseHeavyMonitors) {
1769       // Load (object->mark() | 1) into swap_reg %r0
1770       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1771       __ orr(swap_reg, rscratch1, 1);




1772 
1773       // Save (object->mark() | 1) into BasicLock's displaced header
1774       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1775 
1776       // src -> dest iff dest == r0 else r0 <- dest
1777       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
1778 
1779       // Hmm should this move to the slow path code area???
1780 
1781       // Test if the oopMark is an obvious stack pointer, i.e.,
1782       //  1) (mark & 3) == 0, and
1783       //  2) sp <= mark < mark + os::pagesize()
1784       // These 3 tests can be done by evaluating the following
1785       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1786       // assuming both stack pointer and pagesize have their
1787       // least significant 2 bits clear.
1788       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1789 
1790       __ sub(swap_reg, sp, swap_reg);
1791       __ neg(swap_reg, swap_reg);

1817 
1818   __ rt_call(native_func);
1819 
1820   __ bind(native_return);
1821 
1822   intptr_t return_pc = (intptr_t) __ pc();
1823   oop_maps->add_gc_map(return_pc - start, map);
1824 
1825   // Unpack native results.
1826   switch (ret_type) {
1827   case T_BOOLEAN: __ c2bool(r0);                     break;
1828   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
1829   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
1830   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
1831   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
1832   case T_DOUBLE :
1833   case T_FLOAT  :
1834     // Result is in v0 we'll save as needed
1835     break;
1836   case T_ARRAY:                 // Really a handle

1837   case T_OBJECT:                // Really a handle
1838       break; // can't de-handlize until after safepoint check
1839   case T_VOID: break;
1840   case T_LONG: break;
1841   default       : ShouldNotReachHere();
1842   }
1843 
1844   Label safepoint_in_progress, safepoint_in_progress_done;
1845   Label after_transition;
1846 
1847   // Switch thread to "native transition" state before reading the synchronization state.
1848   // This additional state is necessary because reading and testing the synchronization
1849   // state is not atomic w.r.t. GC, as this scenario demonstrates:
1850   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1851   //     VM thread changes sync state to synchronizing and suspends threads for GC.
1852   //     Thread A is resumed to finish this native method, but doesn't block here since it
1853   //     didn't see any synchronization is progress, and escapes.
1854   __ mov(rscratch1, _thread_in_native_trans);
1855 
1856   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));

3067   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3068 #endif
3069   // Clear the exception oop so GC no longer processes it as a root.
3070   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3071 
3072   // r0: exception oop
3073   // r8:  exception handler
3074   // r4: exception pc
3075   // Jump to handler
3076 
3077   __ br(r8);
3078 
3079   // Make sure all code is generated
3080   masm->flush();
3081 
3082   // Set exception blob
3083   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3084 }
3085 
3086 #endif // COMPILER2


























































































































  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "asm/macroAssembler.inline.hpp"
  30 #include "classfile/symbolTable.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/compiledIC.hpp"
  33 #include "code/debugInfoRec.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetAssembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interp_masm.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "nativeInst_aarch64.hpp"
  43 #include "oops/compiledICHolder.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "prims/methodHandles.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/jniHandles.hpp"

 321     case T_SHORT:
 322     case T_INT:
 323       if (int_args < Argument::n_int_register_parameters_j) {
 324         regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
 325       } else {
 326         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 327         stk_args += 2;
 328       }
 329       break;
 330     case T_VOID:
 331       // halves of T_LONG or T_DOUBLE
 332       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 333       regs[i].set_bad();
 334       break;
 335     case T_LONG:
 336       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 337       // fall through
 338     case T_OBJECT:
 339     case T_ARRAY:
 340     case T_ADDRESS:
 341     case T_PRIMITIVE_OBJECT:
 342       if (int_args < Argument::n_int_register_parameters_j) {
 343         regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
 344       } else {
 345         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 346         stk_args += 2;
 347       }
 348       break;
 349     case T_FLOAT:
 350       if (fp_args < Argument::n_float_register_parameters_j) {
 351         regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
 352       } else {
 353         regs[i].set1(VMRegImpl::stack2reg(stk_args));
 354         stk_args += 2;
 355       }
 356       break;
 357     case T_DOUBLE:
 358       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 359       if (fp_args < Argument::n_float_register_parameters_j) {
 360         regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
 361       } else {
 362         regs[i].set2(VMRegImpl::stack2reg(stk_args));
 363         stk_args += 2;
 364       }
 365       break;
 366     default:
 367       ShouldNotReachHere();
 368       break;
 369     }
 370   }
 371 
 372   return align_up(stk_args, 2);
 373 }
 374 
 375 
 376 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
 377 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
 378 
 379 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
 380 
 381   // Create the mapping between argument positions and registers.
 382 
 383   static const Register INT_ArgReg[java_return_convention_max_int] = {
 384     r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
 385   };
 386 
 387   static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
 388     j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
 389   };
 390 
 391   uint int_args = 0;
 392   uint fp_args = 0;
 393 
 394   for (int i = 0; i < total_args_passed; i++) {
 395     switch (sig_bt[i]) {
 396     case T_BOOLEAN:
 397     case T_CHAR:
 398     case T_BYTE:
 399     case T_SHORT:
 400     case T_INT:
 401       if (int_args < SharedRuntime::java_return_convention_max_int) {
 402         regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
 403         int_args ++;
 404       } else {
 405         return -1;
 406       }
 407       break;
 408     case T_VOID:
 409       // halves of T_LONG or T_DOUBLE
 410       assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
 411       regs[i].set_bad();
 412       break;
 413     case T_LONG:
 414       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 415       // fall through
 416     case T_OBJECT:
 417     case T_ARRAY:
 418     case T_ADDRESS:
 419       // Should T_METADATA be added to java_calling_convention as well ?
 420     case T_METADATA:
 421     case T_PRIMITIVE_OBJECT:
 422       if (int_args < SharedRuntime::java_return_convention_max_int) {
 423         regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
 424         int_args ++;
 425       } else {
 426         return -1;
 427       }
 428       break;
 429     case T_FLOAT:
 430       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 431         regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
 432         fp_args ++;
 433       } else {
 434         return -1;
 435       }
 436       break;
 437     case T_DOUBLE:
 438       assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
 439       if (fp_args < SharedRuntime::java_return_convention_max_float) {
 440         regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
 441         fp_args ++;
 442       } else {
 443         return -1;
 444       }
 445       break;
 446     default:
 447       ShouldNotReachHere();
 448       break;
 449     }
 450   }
 451 
 452   return int_args + fp_args;
 453 }
 454 
 455 // Patch the callers callsite with entry to compiled code if it exists.
 456 static void patch_callers_callsite(MacroAssembler *masm) {
 457   Label L;
 458   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
 459   __ cbz(rscratch1, L);
 460 
 461   __ enter();
 462   __ push_CPU_state();
 463 
 464   // VM needs caller's callsite
 465   // VM needs target method
 466   // This needs to be a long call since we will relocate this adapter to
 467   // the codeBuffer and it may not reach
 468 
 469 #ifndef PRODUCT
 470   assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
 471 #endif
 472 
 473   __ mov(c_rarg0, rmethod);
 474   __ mov(c_rarg1, lr);
 475   __ authenticate_return_address(c_rarg1, rscratch1);
 476   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
 477   __ blr(rscratch1);
 478 
 479   // Explicit isb required because fixup_callers_callsite may change the code
 480   // stream.
 481   __ safepoint_isb();
 482 
 483   __ pop_CPU_state();
 484   // restore sp
 485   __ leave();
 486   __ bind(L);
 487 }
 488 
 489 // For each inline type argument, sig includes the list of fields of
 490 // the inline type. This utility function computes the number of
 491 // arguments for the call if inline types are passed by reference (the
 492 // calling convention the interpreter expects).
 493 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
 494   int total_args_passed = 0;
 495   if (InlineTypePassFieldsAsArgs) {
 496      for (int i = 0; i < sig_extended->length(); i++) {
 497        BasicType bt = sig_extended->at(i)._bt;
 498        if (bt == T_PRIMITIVE_OBJECT) {
 499          // In sig_extended, an inline type argument starts with:
 500          // T_PRIMITIVE_OBJECT, followed by the types of the fields of the
 501          // inline type and T_VOID to mark the end of the value
 502          // type. Inline types are flattened so, for instance, in the
 503          // case of an inline type with an int field and an inline type
 504          // field that itself has 2 fields, an int and a long:
 505          // T_PRIMITIVE_OBJECT T_INT T_PRIMITIVE_OBJECT T_INT T_LONG T_VOID (second
 506          // slot for the T_LONG) T_VOID (inner T_PRIMITIVE_OBJECT) T_VOID
 507          // (outer T_PRIMITIVE_OBJECT)
 508          total_args_passed++;
 509          int vt = 1;
 510          do {
 511            i++;
 512            BasicType bt = sig_extended->at(i)._bt;
 513            BasicType prev_bt = sig_extended->at(i-1)._bt;
 514            if (bt == T_PRIMITIVE_OBJECT) {
 515              vt++;
 516            } else if (bt == T_VOID &&
 517                       prev_bt != T_LONG &&
 518                       prev_bt != T_DOUBLE) {
 519              vt--;
 520            }
 521          } while (vt != 0);
 522        } else {
 523          total_args_passed++;
 524        }
 525      }
 526   } else {
 527     total_args_passed = sig_extended->length();
 528   }
 529 
 530   return total_args_passed;
 531 }
 532 
 533 
 534 static void gen_c2i_adapter_helper(MacroAssembler* masm,
 535                                    BasicType bt,
 536                                    BasicType prev_bt,
 537                                    size_t size_in_bytes,
 538                                    const VMRegPair& reg_pair,
 539                                    const Address& to,
 540                                    Register tmp1,
 541                                    Register tmp2,
 542                                    Register tmp3,
 543                                    int extraspace,
 544                                    bool is_oop) {
 545   assert(bt != T_PRIMITIVE_OBJECT || !InlineTypePassFieldsAsArgs, "no inline type here");
 546   if (bt == T_VOID) {
 547     assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
 548     return;
 549   }
 550 
 551   // Say 4 args:
 552   // i   st_off
 553   // 0   32 T_LONG
 554   // 1   24 T_VOID
 555   // 2   16 T_OBJECT
 556   // 3    8 T_BOOL
 557   // -    0 return address
 558   //
 559   // However to make thing extra confusing. Because we can fit a Java long/double in
 560   // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
 561   // leaves one slot empty and only stores to a single slot. In this case the
 562   // slot that is occupied is the T_VOID slot. See I said it was confusing.
 563 
 564   bool wide = (size_in_bytes == wordSize);
 565   VMReg r_1 = reg_pair.first();
 566   VMReg r_2 = reg_pair.second();
 567   assert(r_2->is_valid() == wide, "invalid size");
 568   if (!r_1->is_valid()) {
 569     assert(!r_2->is_valid(), "");
 570     return;
 571   }
 572 
 573   if (!r_1->is_FloatRegister()) {
 574     Register val = r25;
 575     if (r_1->is_stack()) {
 576       // memory to memory use r25 (scratch registers is used by store_heap_oop)
 577       int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 578       __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
 579     } else {
 580       val = r_1->as_Register();
 581     }
 582     assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
 583     if (is_oop) {
 584       __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
 585     } else {
 586       __ store_sized_value(to, val, size_in_bytes);
 587     }
 588   } else {
 589     if (wide) {
 590       __ strd(r_1->as_FloatRegister(), to);
 591     } else {
 592       // only a float use just part of the slot
 593       __ strs(r_1->as_FloatRegister(), to);
 594     }
 595   }
 596 }
 597 
 598 static void gen_c2i_adapter(MacroAssembler *masm,
 599                             const GrowableArray<SigEntry>* sig_extended,


 600                             const VMRegPair *regs,
 601                             bool requires_clinit_barrier,
 602                             address& c2i_no_clinit_check_entry,
 603                             Label& skip_fixup,
 604                             address start,
 605                             OopMapSet* oop_maps,
 606                             int& frame_complete,
 607                             int& frame_size_in_words,
 608                             bool alloc_inline_receiver) {
 609   if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
 610     Label L_skip_barrier;
 611 
 612     { // Bypass the barrier for non-static methods
 613       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 614       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 615       __ br(Assembler::EQ, L_skip_barrier); // non-static
 616     }
 617 
 618     __ load_method_holder(rscratch2, rmethod);
 619     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 620     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 621 
 622     __ bind(L_skip_barrier);
 623     c2i_no_clinit_check_entry = __ pc();
 624   }
 625 
 626   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 627   bs->c2i_entry_barrier(masm);
 628 
 629   // Before we get into the guts of the C2I adapter, see if we should be here
 630   // at all.  We've come from compiled code and are attempting to jump to the
 631   // interpreter, which means the caller made a static call to get here
 632   // (vcalls always get a compiled target if there is one).  Check for a
 633   // compiled target.  If there is one, we need to patch the caller's call.
 634   patch_callers_callsite(masm);
 635 
 636   __ bind(skip_fixup);
 637 
 638   // Name some registers to be used in the following code. We can use
 639   // anything except r0-r7 which are arguments in the Java calling
 640   // convention, rmethod (r12), and r13 which holds the outgoing sender
 641   // SP for the interpreter.
 642   Register buf_array = r10;   // Array of buffered inline types
 643   Register buf_oop = r11;     // Buffered inline type oop
 644   Register tmp1 = r15;
 645   Register tmp2 = r16;
 646   Register tmp3 = r17;
 647 
 648   if (InlineTypePassFieldsAsArgs) {
 649     // Is there an inline type argument?
 650     bool has_inline_argument = false;
 651     for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
 652       has_inline_argument = (sig_extended->at(i)._bt == T_PRIMITIVE_OBJECT);
 653     }
 654     if (has_inline_argument) {
 655       // There is at least an inline type argument: we're coming from
 656       // compiled code so we have no buffers to back the inline types
 657       // Allocate the buffers here with a runtime call.
 658       RegisterSaver reg_save(false /* save_vectors */);
 659       OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
 660 
 661       frame_complete = __ offset();
 662       address the_pc = __ pc();
 663 
 664       Label retaddr;
 665       __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
 666 
 667       __ mov(c_rarg0, rthread);
 668       __ mov(c_rarg1, rmethod);
 669       __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
 670 
 671       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
 672       __ blr(rscratch1);
 673       __ bind(retaddr);
 674 
 675       oop_maps->add_gc_map(__ pc() - start, map);
 676       __ reset_last_Java_frame(false);
 677 
 678       reg_save.restore_live_registers(masm);





 679 
 680       Label no_exception;
 681       __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 682       __ cbz(rscratch1, no_exception);













 683 
 684       __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 685       __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 686       __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
 687 
 688       __ bind(no_exception);
 689 
 690       // We get an array of objects from the runtime call
 691       __ get_vm_result(buf_array, rthread);
 692       __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
 693     }
 694   }








 695 
 696   // Since all args are passed on the stack, total_args_passed *
 697   // Interpreter::stackElementSize is the space we need.
 698 
 699   int total_args_passed = compute_total_args_passed_int(sig_extended);
 700   int extraspace = total_args_passed * Interpreter::stackElementSize;
 701 
 702   // stack is aligned, keep it that way
 703   extraspace = align_up(extraspace, StackAlignmentInBytes);
 704 
 705   // set senderSP value
 706   __ mov(r19_sender_sp, sp);
 707 
 708   __ sub(sp, sp, extraspace);
 709 
 710   // Now write the args into the outgoing interpreter space
 711 
 712   // next_arg_comp is the next argument from the compiler point of
 713   // view (inline type fields are passed in registers/on the stack). In
 714   // sig_extended, an inline type argument starts with: T_PRIMITIVE_OBJECT,
 715   // followed by the types of the fields of the inline type and T_VOID
 716   // to mark the end of the inline type. ignored counts the number of
 717   // T_PRIMITIVE_OBJECT/T_VOID. next_vt_arg is the next inline type argument:
 718   // used to get the buffer for that argument from the pool of buffers
 719   // we allocated above and want to pass to the
 720   // interpreter. next_arg_int is the next argument from the
 721   // interpreter point of view (inline types are passed by reference).
 722   for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
 723        next_arg_comp < sig_extended->length(); next_arg_comp++) {
 724     assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
 725     assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
 726     BasicType bt = sig_extended->at(next_arg_comp)._bt;
 727     int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
 728     if (!InlineTypePassFieldsAsArgs || bt != T_PRIMITIVE_OBJECT) {
 729       int next_off = st_off - Interpreter::stackElementSize;
 730       const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
 731       const VMRegPair reg_pair = regs[next_arg_comp-ignored];
 732       size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
 733       gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 734                              size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
 735       next_arg_int++;
 736 #ifdef ASSERT
 737       if (bt == T_LONG || bt == T_DOUBLE) {
 738         // Overwrite the unused slot with known junk
 739         __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
 740         __ str(rscratch1, Address(sp, st_off));



 741       }















 742 #endif /* ASSERT */
 743     } else {
 744       ignored++;
 745       // get the buffer from the just allocated pool of buffers
 746       int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_PRIMITIVE_OBJECT);
 747       __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
 748       next_vt_arg++; next_arg_int++;
 749       int vt = 1;
 750       // write fields we get from compiled code in registers/stack
 751       // slots to the buffer: we know we are done with that inline type
 752       // argument when we hit the T_VOID that acts as an end of inline
 753       // type delimiter for this inline type. Inline types are flattened
 754       // so we might encounter embedded inline types. Each entry in
 755       // sig_extended contains a field offset in the buffer.
 756       Label L_null;
 757       do {
 758         next_arg_comp++;
 759         BasicType bt = sig_extended->at(next_arg_comp)._bt;
 760         BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
 761         if (bt == T_PRIMITIVE_OBJECT) {
 762           vt++;
 763           ignored++;
 764         } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
 765           vt--;
 766           ignored++;
 767         } else {
 768           int off = sig_extended->at(next_arg_comp)._offset;
 769           if (off == -1) {
 770             // Nullable inline type argument, emit null check
 771             VMReg reg = regs[next_arg_comp-ignored].first();
 772             Label L_notNull;
 773             if (reg->is_stack()) {
 774               int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
 775               __ ldr(tmp1, Address(sp, ld_off));
 776               __ cbnz(tmp1, L_notNull);
 777             } else {
 778               __ cbnz(reg->as_Register(), L_notNull);
 779             }
 780             __ str(zr, Address(sp, st_off));
 781             __ b(L_null);
 782             __ bind(L_notNull);
 783             continue;
 784           }
 785           assert(off > 0, "offset in object should be positive");
 786           size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
 787           bool is_oop = is_reference_type(bt);
 788           gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
 789                                  size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
 790         }
 791       } while (vt != 0);
 792       // pass the buffer to the interpreter
 793       __ str(buf_oop, Address(sp, st_off));
 794       __ bind(L_null);










 795     }
 796   }
 797 
 798   __ mov(esp, sp); // Interp expects args on caller's expression stack
 799 
 800   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
 801   __ br(rscratch1);
 802 }
 803 
 804 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
 805 





 806 
 807   // Note: r19_sender_sp contains the senderSP on entry. We must
 808   // preserve it since we may do a i2c -> c2i transition if we lose a
 809   // race where compiled code goes non-entrant while we get args
 810   // ready.
 811 
 812   // Adapters are frameless.
 813 
 814   // An i2c adapter is frameless because the *caller* frame, which is
 815   // interpreted, routinely repairs its own esp (from
 816   // interpreter_frame_last_sp), even if a callee has modified the
 817   // stack pointer.  It also recalculates and aligns sp.
 818 
 819   // A c2i adapter is frameless because the *callee* frame, which is
 820   // interpreted, routinely repairs its caller's sp (from sender_sp,
 821   // which is set up via the senderSP register).
 822 
 823   // In other words, if *either* the caller or callee is interpreted, we can
 824   // get the stack pointer repaired after a call.
 825 

 843       range_check(masm, rax, r11,
 844                   Interpreter::code()->code_start(), Interpreter::code()->code_end(),
 845                   L_ok);
 846     if (StubRoutines::code1() != NULL)
 847       range_check(masm, rax, r11,
 848                   StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
 849                   L_ok);
 850     if (StubRoutines::code2() != NULL)
 851       range_check(masm, rax, r11,
 852                   StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
 853                   L_ok);
 854     const char* msg = "i2c adapter must return to an interpreter frame";
 855     __ block_comment(msg);
 856     __ stop(msg);
 857     __ bind(L_ok);
 858     __ block_comment("} verify_i2ce ");
 859 #endif
 860   }
 861 
 862   // Cut-out for having no stack args.
 863   int comp_words_on_stack = 0;
 864   if (comp_args_on_stack) {
 865      comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
 866      __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
 867      __ andr(sp, rscratch1, -16);
 868   }
 869 
 870   // Will jump to the compiled code just as if compiled code was doing it.
 871   // Pre-load the register-jump target early, to schedule it better.
 872   __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
 873 
 874 #if INCLUDE_JVMCI
 875   if (EnableJVMCI) {
 876     // check if this call should be routed towards a specific entry point
 877     __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 878     Label no_alternative_target;
 879     __ cbz(rscratch2, no_alternative_target);
 880     __ mov(rscratch1, rscratch2);
 881     __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
 882     __ bind(no_alternative_target);
 883   }
 884 #endif // INCLUDE_JVMCI
 885 
 886   int total_args_passed = sig->length();
 887 
 888   // Now generate the shuffle code.
 889   for (int i = 0; i < total_args_passed; i++) {
 890     BasicType bt = sig->at(i)._bt;
 891 
 892     assert(bt != T_PRIMITIVE_OBJECT, "i2c adapter doesn't unpack inline typ args");
 893     if (bt == T_VOID) {
 894       assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
 895       continue;
 896     }
 897 
 898     // Pick up 0, 1 or 2 words from SP+offset.
 899     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
 900 


 901     // Load in argument order going down.
 902     int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
 903     // Point to interpreter value (vs. tag)
 904     int next_off = ld_off - Interpreter::stackElementSize;
 905     //
 906     //
 907     //
 908     VMReg r_1 = regs[i].first();
 909     VMReg r_2 = regs[i].second();
 910     if (!r_1->is_valid()) {
 911       assert(!r_2->is_valid(), "");
 912       continue;
 913     }
 914     if (r_1->is_stack()) {
 915       // Convert stack slot to an SP offset (+ wordSize to account for return address )
 916       int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
 917       if (!r_2->is_valid()) {
 918         // sign extend???
 919         __ ldrsw(rscratch2, Address(esp, ld_off));
 920         __ str(rscratch2, Address(sp, st_off));
 921       } else {
 922         //
 923         // We are using two optoregs. This can be either T_OBJECT,
 924         // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 925         // two slots but only uses one for thr T_LONG or T_DOUBLE case
 926         // So we must adjust where to pick up the data to match the
 927         // interpreter.
 928         //
 929         // Interpreter local[n] == MSW, local[n+1] == LSW however locals
 930         // are accessed as negative so LSW is at LOW address
 931 
 932         // ld_off is MSW so get LSW
 933         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;

 934         __ ldr(rscratch2, Address(esp, offset));
 935         // st_off is LSW (i.e. reg.first())
 936          __ str(rscratch2, Address(sp, st_off));
 937        }
 938      } else if (r_1->is_Register()) {  // Register argument
 939        Register r = r_1->as_Register();
 940        if (r_2->is_valid()) {
 941          //
 942          // We are using two VMRegs. This can be either T_OBJECT,
 943          // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
 944          // two slots but only uses one for thr T_LONG or T_DOUBLE case
 945          // So we must adjust where to pick up the data to match the
 946          // interpreter.
 947 
 948         const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
 949 
 950          // this can be a misaligned move
 951          __ ldr(r, Address(esp, offset));
 952        } else {
 953          // sign extend and use a full word?
 954          __ ldrw(r, Address(esp, ld_off));
 955        }
 956      } else {
 957        if (!r_2->is_valid()) {
 958          __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
 959        } else {
 960          __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
 961        }
 962      }
 963    }
 964 

















 965 
 966   __ mov(rscratch2, rscratch1);
 967   __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
 968   __ mov(rscratch1, rscratch2);
 969 
 970   // 6243940 We might end up in handle_wrong_method if
 971   // the callee is deoptimized as we race thru here. If that
 972   // happens we don't want to take a safepoint because the
 973   // caller frame will look interpreted and arguments are now
 974   // "compiled" so it is much better to make this transition
 975   // invisible to the stack walking code. Unfortunately if
 976   // we try and find the callee by normal means a safepoint
 977   // is possible. So we stash the desired callee in the thread
 978   // and the vm will find there should this case occur.
 979 
 980   __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));

 981   __ br(rscratch1);
 982 }
 983 
 984 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {












 985 
 986   Label ok;
 987 
 988   Register holder = rscratch2;
 989   Register receiver = j_rarg0;
 990   Register tmp = r10;  // A call-clobbered register not used for arg passing
 991 
 992   // -------------------------------------------------------------------------
 993   // Generate a C2I adapter.  On entry we know rmethod holds the Method* during calls
 994   // to the interpreter.  The args start out packed in the compiled layout.  They
 995   // need to be unpacked into the interpreter layout.  This will almost always
 996   // require some stack space.  We grow the current (compiled) stack, then repack
 997   // the args.  We  finally end in a jump to the generic interpreter entry point.
 998   // On exit from the interpreter, the interpreter will restore our SP (lest the
 999   // compiled code, which relies solely on SP and not FP, get sick).
1000 
1001   {
1002     __ block_comment("c2i_unverified_entry {");
1003     __ load_klass(rscratch1, receiver);
1004     __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
1005     __ cmp(rscratch1, tmp);
1006     __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
1007     __ br(Assembler::EQ, ok);
1008     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1009 
1010     __ bind(ok);
1011     // Method might have been compiled since the call site was patched to
1012     // interpreted; if that is the case treat it as a miss so we can get
1013     // the call site corrected.
1014     __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
1015     __ cbz(rscratch1, skip_fixup);
1016     __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1017     __ block_comment("} c2i_unverified_entry");
1018   }
1019 }
1020 

1021 
1022 // ---------------------------------------------------------------
1023 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1024                                                             int comp_args_on_stack,
1025                                                             const GrowableArray<SigEntry>* sig,
1026                                                             const VMRegPair* regs,
1027                                                             const GrowableArray<SigEntry>* sig_cc,
1028                                                             const VMRegPair* regs_cc,
1029                                                             const GrowableArray<SigEntry>* sig_cc_ro,
1030                                                             const VMRegPair* regs_cc_ro,
1031                                                             AdapterFingerPrint* fingerprint,
1032                                                             AdapterBlob*& new_adapter,
1033                                                             bool allocate_code_blob) {
1034 
1035   address i2c_entry = __ pc();
1036   gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);



1037 
1038   address c2i_unverified_entry        = __ pc();
1039   address c2i_unverified_inline_entry = __ pc();
1040   Label skip_fixup;
1041 
1042   gen_inline_cache_check(masm, skip_fixup);


1043 
1044   OopMapSet* oop_maps = new OopMapSet();
1045   int frame_complete = CodeOffsets::frame_never_safe;
1046   int frame_size_in_words = 0;
1047 
1048   // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1049   address c2i_no_clinit_check_entry = NULL;
1050   address c2i_inline_ro_entry = __ pc();
1051   if (regs_cc != regs_cc_ro) {
1052     // No class init barrier needed because method is guaranteed to be non-static
1053     gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1054                     skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1055     skip_fixup.reset();
1056   }
1057 
1058   // Scalarized c2i adapter
1059   address c2i_entry        = __ pc();
1060   address c2i_inline_entry = __ pc();
1061   gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1062                   skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1063 
1064   // Non-scalarized c2i adapter
1065   if (regs != regs_cc) {
1066     c2i_unverified_inline_entry = __ pc();
1067     Label inline_entry_skip_fixup;
1068     gen_inline_cache_check(masm, inline_entry_skip_fixup);
1069 
1070     c2i_inline_entry = __ pc();
1071     gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1072                     inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1073   }
1074 
1075   __ flush();
1076 
1077   // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1078   // the GC knows about the location of oop argument locations passed to the c2i adapter.
1079   if (allocate_code_blob) {
1080     bool caller_must_gc_arguments = (regs != regs_cc);
1081     new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1082   }
1083 
1084   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1085 }
1086 
1087 static int c_calling_convention_priv(const BasicType *sig_bt,
1088                                          VMRegPair *regs,
1089                                          VMRegPair *regs2,
1090                                          int total_args_passed) {
1091   assert(regs2 == NULL, "not needed on AArch64");
1092 
1093 // We return the amount of VMRegImpl stack slots we need to reserve for all
1094 // the arguments NOT counting out_preserve_stack_slots.
1095 
1096     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1097       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
1098     };
1099     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1100       c_farg0, c_farg1, c_farg2, c_farg3,
1101       c_farg4, c_farg5, c_farg6, c_farg7
1102     };
1103 
1104     uint int_args = 0;

1112       case T_BYTE:
1113       case T_SHORT:
1114       case T_INT:
1115         if (int_args < Argument::n_int_register_parameters_c) {
1116           regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1117         } else {
1118 #ifdef __APPLE__
1119           // Less-than word types are stored one after another.
1120           // The code is unable to handle this so bailout.
1121           return -1;
1122 #endif
1123           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1124           stk_args += 2;
1125         }
1126         break;
1127       case T_LONG:
1128         assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1129         // fall through
1130       case T_OBJECT:
1131       case T_ARRAY:
1132       case T_PRIMITIVE_OBJECT:
1133       case T_ADDRESS:
1134       case T_METADATA:
1135         if (int_args < Argument::n_int_register_parameters_c) {
1136           regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1137         } else {
1138           regs[i].set2(VMRegImpl::stack2reg(stk_args));
1139           stk_args += 2;
1140         }
1141         break;
1142       case T_FLOAT:
1143         if (fp_args < Argument::n_float_register_parameters_c) {
1144           regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1145         } else {
1146 #ifdef __APPLE__
1147           // Less-than word types are stored one after another.
1148           // The code is unable to handle this so bailout.
1149           return -1;
1150 #endif
1151           regs[i].set1(VMRegImpl::stack2reg(stk_args));
1152           stk_args += 2;

1928   int temploc = -1;
1929   for (int ai = 0; ai < arg_order.length(); ai += 2) {
1930     int i = arg_order.at(ai);
1931     int c_arg = arg_order.at(ai + 1);
1932     __ block_comment(err_msg("move %d -> %d", i, c_arg));
1933     assert(c_arg != -1 && i != -1, "wrong order");
1934 #ifdef ASSERT
1935     if (in_regs[i].first()->is_Register()) {
1936       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1937     } else if (in_regs[i].first()->is_FloatRegister()) {
1938       assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1939     }
1940     if (out_regs[c_arg].first()->is_Register()) {
1941       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1942     } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1943       freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1944     }
1945 #endif /* ASSERT */
1946     switch (in_sig_bt[i]) {
1947       case T_ARRAY:
1948       case T_PRIMITIVE_OBJECT:
1949       case T_OBJECT:
1950         __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1951                        ((i == 0) && (!is_static)),
1952                        &receiver_offset);
1953         int_args++;
1954         break;
1955       case T_VOID:
1956         break;
1957 
1958       case T_FLOAT:
1959         __ float_move(in_regs[i], out_regs[c_arg]);
1960         float_args++;
1961         break;
1962 
1963       case T_DOUBLE:
1964         assert( i + 1 < total_in_args &&
1965                 in_sig_bt[i + 1] == T_VOID &&
1966                 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1967         __ double_move(in_regs[i], out_regs[c_arg]);
1968         float_args++;

2043   Label lock_done;
2044 
2045   if (method->is_synchronized()) {
2046     Label count;
2047     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2048 
2049     // Get the handle (the 2nd argument)
2050     __ mov(oop_handle_reg, c_rarg1);
2051 
2052     // Get address of the box
2053 
2054     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2055 
2056     // Load the oop from the handle
2057     __ ldr(obj_reg, Address(oop_handle_reg, 0));
2058 
2059     if (!UseHeavyMonitors) {
2060       // Load (object->mark() | 1) into swap_reg %r0
2061       __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2062       __ orr(swap_reg, rscratch1, 1);
2063       if (EnableValhalla) {
2064         // Mask inline_type bit such that we go to the slow path if object is an inline type
2065         __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2066       }
2067 
2068       // Save (object->mark() | 1) into BasicLock's displaced header
2069       __ str(swap_reg, Address(lock_reg, mark_word_offset));
2070 
2071       // src -> dest iff dest == r0 else r0 <- dest
2072       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/NULL);
2073 
2074       // Hmm should this move to the slow path code area???
2075 
2076       // Test if the oopMark is an obvious stack pointer, i.e.,
2077       //  1) (mark & 3) == 0, and
2078       //  2) sp <= mark < mark + os::pagesize()
2079       // These 3 tests can be done by evaluating the following
2080       // expression: ((mark - sp) & (3 - os::vm_page_size())),
2081       // assuming both stack pointer and pagesize have their
2082       // least significant 2 bits clear.
2083       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2084 
2085       __ sub(swap_reg, sp, swap_reg);
2086       __ neg(swap_reg, swap_reg);

2112 
2113   __ rt_call(native_func);
2114 
2115   __ bind(native_return);
2116 
2117   intptr_t return_pc = (intptr_t) __ pc();
2118   oop_maps->add_gc_map(return_pc - start, map);
2119 
2120   // Unpack native results.
2121   switch (ret_type) {
2122   case T_BOOLEAN: __ c2bool(r0);                     break;
2123   case T_CHAR   : __ ubfx(r0, r0, 0, 16);            break;
2124   case T_BYTE   : __ sbfx(r0, r0, 0, 8);             break;
2125   case T_SHORT  : __ sbfx(r0, r0, 0, 16);            break;
2126   case T_INT    : __ sbfx(r0, r0, 0, 32);            break;
2127   case T_DOUBLE :
2128   case T_FLOAT  :
2129     // Result is in v0 we'll save as needed
2130     break;
2131   case T_ARRAY:                 // Really a handle
2132   case T_PRIMITIVE_OBJECT:           // Really a handle
2133   case T_OBJECT:                // Really a handle
2134       break; // can't de-handlize until after safepoint check
2135   case T_VOID: break;
2136   case T_LONG: break;
2137   default       : ShouldNotReachHere();
2138   }
2139 
2140   Label safepoint_in_progress, safepoint_in_progress_done;
2141   Label after_transition;
2142 
2143   // Switch thread to "native transition" state before reading the synchronization state.
2144   // This additional state is necessary because reading and testing the synchronization
2145   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2146   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2147   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2148   //     Thread A is resumed to finish this native method, but doesn't block here since it
2149   //     didn't see any synchronization is progress, and escapes.
2150   __ mov(rscratch1, _thread_in_native_trans);
2151 
2152   __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));

3363   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3364 #endif
3365   // Clear the exception oop so GC no longer processes it as a root.
3366   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3367 
3368   // r0: exception oop
3369   // r8:  exception handler
3370   // r4: exception pc
3371   // Jump to handler
3372 
3373   __ br(r8);
3374 
3375   // Make sure all code is generated
3376   masm->flush();
3377 
3378   // Set exception blob
3379   _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3380 }
3381 
3382 #endif // COMPILER2
3383 
3384 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3385   BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3386   CodeBuffer buffer(buf);
3387   short buffer_locs[20];
3388   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3389                                          sizeof(buffer_locs)/sizeof(relocInfo));
3390 
3391   MacroAssembler _masm(&buffer);
3392   MacroAssembler* masm = &_masm;
3393 
3394   const Array<SigEntry>* sig_vk = vk->extended_sig();
3395   const Array<VMRegPair>* regs = vk->return_regs();
3396 
3397   int pack_fields_jobject_off = __ offset();
3398   // Resolve pre-allocated buffer from JNI handle.
3399   // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3400   Register Rresult = r14;  // See StubGenerator::generate_call_stub().
3401   __ ldr(r0, Address(Rresult));
3402   __ resolve_jobject(r0 /* value */,
3403                      rthread /* thread */,
3404                      r12 /* tmp */);
3405   __ str(r0, Address(Rresult));
3406 
3407   int pack_fields_off = __ offset();
3408 
3409   int j = 1;
3410   for (int i = 0; i < sig_vk->length(); i++) {
3411     BasicType bt = sig_vk->at(i)._bt;
3412     if (bt == T_PRIMITIVE_OBJECT) {
3413       continue;
3414     }
3415     if (bt == T_VOID) {
3416       if (sig_vk->at(i-1)._bt == T_LONG ||
3417           sig_vk->at(i-1)._bt == T_DOUBLE) {
3418         j++;
3419       }
3420       continue;
3421     }
3422     int off = sig_vk->at(i)._offset;
3423     VMRegPair pair = regs->at(j);
3424     VMReg r_1 = pair.first();
3425     VMReg r_2 = pair.second();
3426     Address to(r0, off);
3427     if (bt == T_FLOAT) {
3428       __ strs(r_1->as_FloatRegister(), to);
3429     } else if (bt == T_DOUBLE) {
3430       __ strd(r_1->as_FloatRegister(), to);
3431     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3432       Register val = r_1->as_Register();
3433       assert_different_registers(r0, val);
3434       // We don't need barriers because the destination is a newly allocated object.
3435       // Also, we cannot use store_heap_oop(to, val) because it uses r8 as tmp.
3436       if (UseCompressedOops) {
3437         __ encode_heap_oop(val);
3438         __ str(val, to);
3439       } else {
3440         __ str(val, to);
3441       }
3442     } else {
3443       assert(is_java_primitive(bt), "unexpected basic type");
3444       assert_different_registers(r0, r_1->as_Register());
3445       size_t size_in_bytes = type2aelembytes(bt);
3446       __ store_sized_value(to, r_1->as_Register(), size_in_bytes);
3447     }
3448     j++;
3449   }
3450   assert(j == regs->length(), "missed a field?");
3451 
3452   __ ret(lr);
3453 
3454   int unpack_fields_off = __ offset();
3455 
3456   Label skip;
3457   __ cbz(r0, skip);
3458 
3459   j = 1;
3460   for (int i = 0; i < sig_vk->length(); i++) {
3461     BasicType bt = sig_vk->at(i)._bt;
3462     if (bt == T_PRIMITIVE_OBJECT) {
3463       continue;
3464     }
3465     if (bt == T_VOID) {
3466       if (sig_vk->at(i-1)._bt == T_LONG ||
3467           sig_vk->at(i-1)._bt == T_DOUBLE) {
3468         j++;
3469       }
3470       continue;
3471     }
3472     int off = sig_vk->at(i)._offset;
3473     assert(off > 0, "offset in object should be positive");
3474     VMRegPair pair = regs->at(j);
3475     VMReg r_1 = pair.first();
3476     VMReg r_2 = pair.second();
3477     Address from(r0, off);
3478     if (bt == T_FLOAT) {
3479       __ ldrs(r_1->as_FloatRegister(), from);
3480     } else if (bt == T_DOUBLE) {
3481       __ ldrd(r_1->as_FloatRegister(), from);
3482     } else if (bt == T_OBJECT || bt == T_ARRAY) {
3483       assert_different_registers(r0, r_1->as_Register());
3484       __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3485     } else {
3486       assert(is_java_primitive(bt), "unexpected basic type");
3487       assert_different_registers(r0, r_1->as_Register());
3488 
3489       size_t size_in_bytes = type2aelembytes(bt);
3490       __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3491     }
3492     j++;
3493   }
3494   assert(j == regs->length(), "missed a field?");
3495 
3496   __ bind(skip);
3497 
3498   __ ret(lr);
3499 
3500   __ flush();
3501 
3502   return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3503 }
< prev index next >