10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "asm/macroAssembler.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/compiledIC.hpp"
32 #include "code/debugInfoRec.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/oopMap.hpp"
36 #include "gc/shared/barrierSetAssembler.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "interpreter/interp_masm.hpp"
39 #include "logging/log.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "nativeInst_aarch64.hpp"
42 #include "oops/compiledICHolder.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "oops/method.inline.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/continuationEntry.inline.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/jniHandles.hpp"
320 case T_SHORT:
321 case T_INT:
322 if (int_args < Argument::n_int_register_parameters_j) {
323 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
324 } else {
325 regs[i].set1(VMRegImpl::stack2reg(stk_args));
326 stk_args += 2;
327 }
328 break;
329 case T_VOID:
330 // halves of T_LONG or T_DOUBLE
331 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
332 regs[i].set_bad();
333 break;
334 case T_LONG:
335 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
336 // fall through
337 case T_OBJECT:
338 case T_ARRAY:
339 case T_ADDRESS:
340 if (int_args < Argument::n_int_register_parameters_j) {
341 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
342 } else {
343 regs[i].set2(VMRegImpl::stack2reg(stk_args));
344 stk_args += 2;
345 }
346 break;
347 case T_FLOAT:
348 if (fp_args < Argument::n_float_register_parameters_j) {
349 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
350 } else {
351 regs[i].set1(VMRegImpl::stack2reg(stk_args));
352 stk_args += 2;
353 }
354 break;
355 case T_DOUBLE:
356 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
357 if (fp_args < Argument::n_float_register_parameters_j) {
358 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
359 } else {
360 regs[i].set2(VMRegImpl::stack2reg(stk_args));
361 stk_args += 2;
362 }
363 break;
364 default:
365 ShouldNotReachHere();
366 break;
367 }
368 }
369
370 return align_up(stk_args, 2);
371 }
372
373 // Patch the callers callsite with entry to compiled code if it exists.
374 static void patch_callers_callsite(MacroAssembler *masm) {
375 Label L;
376 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
377 __ cbz(rscratch1, L);
378
379 __ enter();
380 __ push_CPU_state();
381
382 // VM needs caller's callsite
383 // VM needs target method
384 // This needs to be a long call since we will relocate this adapter to
385 // the codeBuffer and it may not reach
386
387 #ifndef PRODUCT
388 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
389 #endif
390
391 __ mov(c_rarg0, rmethod);
392 __ mov(c_rarg1, lr);
393 __ authenticate_return_address(c_rarg1, rscratch1);
394 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
395 __ blr(rscratch1);
396
397 // Explicit isb required because fixup_callers_callsite may change the code
398 // stream.
399 __ safepoint_isb();
400
401 __ pop_CPU_state();
402 // restore sp
403 __ leave();
404 __ bind(L);
405 }
406
407 static void gen_c2i_adapter(MacroAssembler *masm,
408 int total_args_passed,
409 int comp_args_on_stack,
410 const BasicType *sig_bt,
411 const VMRegPair *regs,
412 Label& skip_fixup) {
413 // Before we get into the guts of the C2I adapter, see if we should be here
414 // at all. We've come from compiled code and are attempting to jump to the
415 // interpreter, which means the caller made a static call to get here
416 // (vcalls always get a compiled target if there is one). Check for a
417 // compiled target. If there is one, we need to patch the caller's call.
418 patch_callers_callsite(masm);
419
420 __ bind(skip_fixup);
421
422 int words_pushed = 0;
423
424 // Since all args are passed on the stack, total_args_passed *
425 // Interpreter::stackElementSize is the space we need.
426
427 int extraspace = total_args_passed * Interpreter::stackElementSize;
428
429 __ mov(r19_sender_sp, sp);
430
431 // stack is aligned, keep it that way
432 extraspace = align_up(extraspace, 2*wordSize);
433
434 if (extraspace)
435 __ sub(sp, sp, extraspace);
436
437 // Now write the args into the outgoing interpreter space
438 for (int i = 0; i < total_args_passed; i++) {
439 if (sig_bt[i] == T_VOID) {
440 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
441 continue;
442 }
443
444 // offset to start parameters
445 int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
446 int next_off = st_off - Interpreter::stackElementSize;
447
448 // Say 4 args:
449 // i st_off
450 // 0 32 T_LONG
451 // 1 24 T_VOID
452 // 2 16 T_OBJECT
453 // 3 8 T_BOOL
454 // - 0 return address
455 //
456 // However to make thing extra confusing. Because we can fit a Java long/double in
457 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
458 // leaves one slot empty and only stores to a single slot. In this case the
459 // slot that is occupied is the T_VOID slot. See I said it was confusing.
460
461 VMReg r_1 = regs[i].first();
462 VMReg r_2 = regs[i].second();
463 if (!r_1->is_valid()) {
464 assert(!r_2->is_valid(), "");
465 continue;
466 }
467 if (r_1->is_stack()) {
468 // memory to memory use rscratch1
469 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
470 + extraspace
471 + words_pushed * wordSize);
472 if (!r_2->is_valid()) {
473 // sign extend??
474 __ ldrw(rscratch1, Address(sp, ld_off));
475 __ str(rscratch1, Address(sp, st_off));
476
477 } else {
478
479 __ ldr(rscratch1, Address(sp, ld_off));
480
481 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
482 // T_DOUBLE and T_LONG use two slots in the interpreter
483 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
484 // ld_off == LSW, ld_off+wordSize == MSW
485 // st_off == MSW, next_off == LSW
486 __ str(rscratch1, Address(sp, next_off));
487 #ifdef ASSERT
488 // Overwrite the unused slot with known junk
489 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
490 __ str(rscratch1, Address(sp, st_off));
491 #endif /* ASSERT */
492 } else {
493 __ str(rscratch1, Address(sp, st_off));
494 }
495 }
496 } else if (r_1->is_Register()) {
497 Register r = r_1->as_Register();
498 if (!r_2->is_valid()) {
499 // must be only an int (or less ) so move only 32bits to slot
500 // why not sign extend??
501 __ str(r, Address(sp, st_off));
502 } else {
503 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
504 // T_DOUBLE and T_LONG use two slots in the interpreter
505 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
506 // jlong/double in gpr
507 #ifdef ASSERT
508 // Overwrite the unused slot with known junk
509 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
510 __ str(rscratch1, Address(sp, st_off));
511 #endif /* ASSERT */
512 __ str(r, Address(sp, next_off));
513 } else {
514 __ str(r, Address(sp, st_off));
515 }
516 }
517 } else {
518 assert(r_1->is_FloatRegister(), "");
519 if (!r_2->is_valid()) {
520 // only a float use just part of the slot
521 __ strs(r_1->as_FloatRegister(), Address(sp, st_off));
522 } else {
523 #ifdef ASSERT
524 // Overwrite the unused slot with known junk
525 __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
526 __ str(rscratch1, Address(sp, st_off));
527 #endif /* ASSERT */
528 __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
529 }
530 }
531 }
532
533 __ mov(esp, sp); // Interp expects args on caller's expression stack
534
535 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
536 __ br(rscratch1);
537 }
538
539
540 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
541 int total_args_passed,
542 int comp_args_on_stack,
543 const BasicType *sig_bt,
544 const VMRegPair *regs) {
545
546 // Note: r19_sender_sp contains the senderSP on entry. We must
547 // preserve it since we may do a i2c -> c2i transition if we lose a
548 // race where compiled code goes non-entrant while we get args
549 // ready.
550
551 // Adapters are frameless.
552
553 // An i2c adapter is frameless because the *caller* frame, which is
554 // interpreted, routinely repairs its own esp (from
555 // interpreter_frame_last_sp), even if a callee has modified the
556 // stack pointer. It also recalculates and aligns sp.
557
558 // A c2i adapter is frameless because the *callee* frame, which is
559 // interpreted, routinely repairs its caller's sp (from sender_sp,
560 // which is set up via the senderSP register).
561
562 // In other words, if *either* the caller or callee is interpreted, we can
563 // get the stack pointer repaired after a call.
564
587 range_check(masm, rax, r11,
588 StubRoutines::initial_stubs_code()->code_begin(),
589 StubRoutines::initial_stubs_code()->code_end(),
590 L_ok);
591 }
592 if (StubRoutines::final_stubs_code() != nullptr) {
593 range_check(masm, rax, r11,
594 StubRoutines::final_stubs_code()->code_begin(),
595 StubRoutines::final_stubs_code()->code_end(),
596 L_ok);
597 }
598 const char* msg = "i2c adapter must return to an interpreter frame";
599 __ block_comment(msg);
600 __ stop(msg);
601 __ bind(L_ok);
602 __ block_comment("} verify_i2ce ");
603 #endif
604 }
605
606 // Cut-out for having no stack args.
607 int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
608 if (comp_args_on_stack) {
609 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
610 __ andr(sp, rscratch1, -16);
611 }
612
613 // Will jump to the compiled code just as if compiled code was doing it.
614 // Pre-load the register-jump target early, to schedule it better.
615 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
616
617 #if INCLUDE_JVMCI
618 if (EnableJVMCI) {
619 // check if this call should be routed towards a specific entry point
620 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
621 Label no_alternative_target;
622 __ cbz(rscratch2, no_alternative_target);
623 __ mov(rscratch1, rscratch2);
624 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
625 __ bind(no_alternative_target);
626 }
627 #endif // INCLUDE_JVMCI
628
629 // Now generate the shuffle code.
630 for (int i = 0; i < total_args_passed; i++) {
631 if (sig_bt[i] == T_VOID) {
632 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
633 continue;
634 }
635
636 // Pick up 0, 1 or 2 words from SP+offset.
637
638 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
639 "scrambled load targets?");
640 // Load in argument order going down.
641 int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize;
642 // Point to interpreter value (vs. tag)
643 int next_off = ld_off - Interpreter::stackElementSize;
644 //
645 //
646 //
647 VMReg r_1 = regs[i].first();
648 VMReg r_2 = regs[i].second();
649 if (!r_1->is_valid()) {
650 assert(!r_2->is_valid(), "");
651 continue;
652 }
653 if (r_1->is_stack()) {
654 // Convert stack slot to an SP offset (+ wordSize to account for return address )
655 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
656 if (!r_2->is_valid()) {
657 // sign extend???
658 __ ldrsw(rscratch2, Address(esp, ld_off));
659 __ str(rscratch2, Address(sp, st_off));
660 } else {
661 //
662 // We are using two optoregs. This can be either T_OBJECT,
663 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
664 // two slots but only uses one for thr T_LONG or T_DOUBLE case
665 // So we must adjust where to pick up the data to match the
666 // interpreter.
667 //
668 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
669 // are accessed as negative so LSW is at LOW address
670
671 // ld_off is MSW so get LSW
672 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
673 next_off : ld_off;
674 __ ldr(rscratch2, Address(esp, offset));
675 // st_off is LSW (i.e. reg.first())
676 __ str(rscratch2, Address(sp, st_off));
677 }
678 } else if (r_1->is_Register()) { // Register argument
679 Register r = r_1->as_Register();
680 if (r_2->is_valid()) {
681 //
682 // We are using two VMRegs. This can be either T_OBJECT,
683 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
684 // two slots but only uses one for thr T_LONG or T_DOUBLE case
685 // So we must adjust where to pick up the data to match the
686 // interpreter.
687
688 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
689 next_off : ld_off;
690
691 // this can be a misaligned move
692 __ ldr(r, Address(esp, offset));
693 } else {
694 // sign extend and use a full word?
695 __ ldrw(r, Address(esp, ld_off));
696 }
697 } else {
698 if (!r_2->is_valid()) {
699 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
700 } else {
701 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
702 }
703 }
704 }
705
706 __ mov(rscratch2, rscratch1);
707 __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
708 __ mov(rscratch1, rscratch2);
709
710 // 6243940 We might end up in handle_wrong_method if
711 // the callee is deoptimized as we race thru here. If that
712 // happens we don't want to take a safepoint because the
713 // caller frame will look interpreted and arguments are now
714 // "compiled" so it is much better to make this transition
715 // invisible to the stack walking code. Unfortunately if
716 // we try and find the callee by normal means a safepoint
717 // is possible. So we stash the desired callee in the thread
718 // and the vm will find there should this case occur.
719
720 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
721
722 __ br(rscratch1);
723 }
724
725 // ---------------------------------------------------------------
726 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
727 int total_args_passed,
728 int comp_args_on_stack,
729 const BasicType *sig_bt,
730 const VMRegPair *regs,
731 AdapterFingerPrint* fingerprint) {
732 address i2c_entry = __ pc();
733
734 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
735
736 address c2i_unverified_entry = __ pc();
737 Label skip_fixup;
738
739 Label ok;
740
741 Register holder = rscratch2;
742 Register receiver = j_rarg0;
743 Register tmp = r10; // A call-clobbered register not used for arg passing
744
745 // -------------------------------------------------------------------------
746 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
747 // to the interpreter. The args start out packed in the compiled layout. They
748 // need to be unpacked into the interpreter layout. This will almost always
749 // require some stack space. We grow the current (compiled) stack, then repack
750 // the args. We finally end in a jump to the generic interpreter entry point.
751 // On exit from the interpreter, the interpreter will restore our SP (lest the
752 // compiled code, which relies solely on SP and not FP, get sick).
753
754 {
755 __ block_comment("c2i_unverified_entry {");
756 __ load_klass(rscratch1, receiver);
757 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
758 __ cmp(rscratch1, tmp);
759 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
760 __ br(Assembler::EQ, ok);
761 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
762
763 __ bind(ok);
764 // Method might have been compiled since the call site was patched to
765 // interpreted; if that is the case treat it as a miss so we can get
766 // the call site corrected.
767 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
768 __ cbz(rscratch1, skip_fixup);
769 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
770 __ block_comment("} c2i_unverified_entry");
771 }
772
773 address c2i_entry = __ pc();
774
775 // Class initialization barrier for static methods
776 address c2i_no_clinit_check_entry = nullptr;
777 if (VM_Version::supports_fast_class_init_checks()) {
778 Label L_skip_barrier;
779
780 { // Bypass the barrier for non-static methods
781 __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
782 __ andsw(zr, rscratch1, JVM_ACC_STATIC);
783 __ br(Assembler::EQ, L_skip_barrier); // non-static
784 }
785
786 __ load_method_holder(rscratch2, rmethod);
787 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
788 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
789
790 __ bind(L_skip_barrier);
791 c2i_no_clinit_check_entry = __ pc();
792 }
793
794 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
795 bs->c2i_entry_barrier(masm);
796
797 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
798
799 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
800 }
801
802 static int c_calling_convention_priv(const BasicType *sig_bt,
803 VMRegPair *regs,
804 VMRegPair *regs2,
805 int total_args_passed) {
806 assert(regs2 == nullptr, "not needed on AArch64");
807
808 // We return the amount of VMRegImpl stack slots we need to reserve for all
809 // the arguments NOT counting out_preserve_stack_slots.
810
811 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
812 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
813 };
814 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
815 c_farg0, c_farg1, c_farg2, c_farg3,
816 c_farg4, c_farg5, c_farg6, c_farg7
817 };
818
819 uint int_args = 0;
827 case T_BYTE:
828 case T_SHORT:
829 case T_INT:
830 if (int_args < Argument::n_int_register_parameters_c) {
831 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
832 } else {
833 #ifdef __APPLE__
834 // Less-than word types are stored one after another.
835 // The code is unable to handle this so bailout.
836 return -1;
837 #endif
838 regs[i].set1(VMRegImpl::stack2reg(stk_args));
839 stk_args += 2;
840 }
841 break;
842 case T_LONG:
843 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
844 // fall through
845 case T_OBJECT:
846 case T_ARRAY:
847 case T_ADDRESS:
848 case T_METADATA:
849 if (int_args < Argument::n_int_register_parameters_c) {
850 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
851 } else {
852 regs[i].set2(VMRegImpl::stack2reg(stk_args));
853 stk_args += 2;
854 }
855 break;
856 case T_FLOAT:
857 if (fp_args < Argument::n_float_register_parameters_c) {
858 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
859 } else {
860 #ifdef __APPLE__
861 // Less-than word types are stored one after another.
862 // The code is unable to handle this so bailout.
863 return -1;
864 #endif
865 regs[i].set1(VMRegImpl::stack2reg(stk_args));
866 stk_args += 2;
1651 int temploc = -1;
1652 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1653 int i = arg_order.at(ai);
1654 int c_arg = arg_order.at(ai + 1);
1655 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1656 assert(c_arg != -1 && i != -1, "wrong order");
1657 #ifdef ASSERT
1658 if (in_regs[i].first()->is_Register()) {
1659 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1660 } else if (in_regs[i].first()->is_FloatRegister()) {
1661 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1662 }
1663 if (out_regs[c_arg].first()->is_Register()) {
1664 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1665 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1666 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1667 }
1668 #endif /* ASSERT */
1669 switch (in_sig_bt[i]) {
1670 case T_ARRAY:
1671 case T_OBJECT:
1672 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1673 ((i == 0) && (!is_static)),
1674 &receiver_offset);
1675 int_args++;
1676 break;
1677 case T_VOID:
1678 break;
1679
1680 case T_FLOAT:
1681 __ float_move(in_regs[i], out_regs[c_arg]);
1682 float_args++;
1683 break;
1684
1685 case T_DOUBLE:
1686 assert( i + 1 < total_in_args &&
1687 in_sig_bt[i + 1] == T_VOID &&
1688 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1689 __ double_move(in_regs[i], out_regs[c_arg]);
1690 float_args++;
1767 if (method->is_synchronized()) {
1768 Label count;
1769 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1770
1771 // Get the handle (the 2nd argument)
1772 __ mov(oop_handle_reg, c_rarg1);
1773
1774 // Get address of the box
1775
1776 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1777
1778 // Load the oop from the handle
1779 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1780
1781 if (LockingMode == LM_MONITOR) {
1782 __ b(slow_path_lock);
1783 } else if (LockingMode == LM_LEGACY) {
1784 // Load (object->mark() | 1) into swap_reg %r0
1785 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1786 __ orr(swap_reg, rscratch1, 1);
1787
1788 // Save (object->mark() | 1) into BasicLock's displaced header
1789 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1790
1791 // src -> dest iff dest == r0 else r0 <- dest
1792 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1793
1794 // Hmm should this move to the slow path code area???
1795
1796 // Test if the oopMark is an obvious stack pointer, i.e.,
1797 // 1) (mark & 3) == 0, and
1798 // 2) sp <= mark < mark + os::pagesize()
1799 // These 3 tests can be done by evaluating the following
1800 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1801 // assuming both stack pointer and pagesize have their
1802 // least significant 2 bits clear.
1803 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1804
1805 __ sub(swap_reg, sp, swap_reg);
1806 __ neg(swap_reg, swap_reg);
1834
1835 __ rt_call(native_func);
1836
1837 __ bind(native_return);
1838
1839 intptr_t return_pc = (intptr_t) __ pc();
1840 oop_maps->add_gc_map(return_pc - start, map);
1841
1842 // Unpack native results.
1843 switch (ret_type) {
1844 case T_BOOLEAN: __ c2bool(r0); break;
1845 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
1846 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
1847 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
1848 case T_INT : __ sbfx(r0, r0, 0, 32); break;
1849 case T_DOUBLE :
1850 case T_FLOAT :
1851 // Result is in v0 we'll save as needed
1852 break;
1853 case T_ARRAY: // Really a handle
1854 case T_OBJECT: // Really a handle
1855 break; // can't de-handlize until after safepoint check
1856 case T_VOID: break;
1857 case T_LONG: break;
1858 default : ShouldNotReachHere();
1859 }
1860
1861 Label safepoint_in_progress, safepoint_in_progress_done;
1862 Label after_transition;
1863
1864 // Switch thread to "native transition" state before reading the synchronization state.
1865 // This additional state is necessary because reading and testing the synchronization
1866 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1867 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1868 // VM thread changes sync state to synchronizing and suspends threads for GC.
1869 // Thread A is resumed to finish this native method, but doesn't block here since it
1870 // didn't see any synchronization is progress, and escapes.
1871 __ mov(rscratch1, _thread_in_native_trans);
1872
1873 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
3090 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3091 #endif
3092 // Clear the exception oop so GC no longer processes it as a root.
3093 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3094
3095 // r0: exception oop
3096 // r8: exception handler
3097 // r4: exception pc
3098 // Jump to handler
3099
3100 __ br(r8);
3101
3102 // Make sure all code is generated
3103 masm->flush();
3104
3105 // Set exception blob
3106 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3107 }
3108
3109 #endif // COMPILER2
|
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "asm/macroAssembler.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "classfile/symbolTable.hpp"
31 #include "code/codeCache.hpp"
32 #include "code/compiledIC.hpp"
33 #include "code/debugInfoRec.hpp"
34 #include "code/icBuffer.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/oopMap.hpp"
37 #include "gc/shared/barrierSetAssembler.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interp_masm.hpp"
40 #include "logging/log.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "nativeInst_aarch64.hpp"
43 #include "oops/compiledICHolder.hpp"
44 #include "oops/klass.inline.hpp"
45 #include "oops/method.inline.hpp"
46 #include "prims/methodHandles.hpp"
47 #include "runtime/continuation.hpp"
48 #include "runtime/continuationEntry.inline.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/jniHandles.hpp"
321 case T_SHORT:
322 case T_INT:
323 if (int_args < Argument::n_int_register_parameters_j) {
324 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
325 } else {
326 regs[i].set1(VMRegImpl::stack2reg(stk_args));
327 stk_args += 2;
328 }
329 break;
330 case T_VOID:
331 // halves of T_LONG or T_DOUBLE
332 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
333 regs[i].set_bad();
334 break;
335 case T_LONG:
336 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
337 // fall through
338 case T_OBJECT:
339 case T_ARRAY:
340 case T_ADDRESS:
341 case T_PRIMITIVE_OBJECT:
342 if (int_args < Argument::n_int_register_parameters_j) {
343 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
344 } else {
345 regs[i].set2(VMRegImpl::stack2reg(stk_args));
346 stk_args += 2;
347 }
348 break;
349 case T_FLOAT:
350 if (fp_args < Argument::n_float_register_parameters_j) {
351 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
352 } else {
353 regs[i].set1(VMRegImpl::stack2reg(stk_args));
354 stk_args += 2;
355 }
356 break;
357 case T_DOUBLE:
358 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
359 if (fp_args < Argument::n_float_register_parameters_j) {
360 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
361 } else {
362 regs[i].set2(VMRegImpl::stack2reg(stk_args));
363 stk_args += 2;
364 }
365 break;
366 default:
367 ShouldNotReachHere();
368 break;
369 }
370 }
371
372 return align_up(stk_args, 2);
373 }
374
375
376 const uint SharedRuntime::java_return_convention_max_int = Argument::n_int_register_parameters_j;
377 const uint SharedRuntime::java_return_convention_max_float = Argument::n_float_register_parameters_j;
378
379 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
380
381 // Create the mapping between argument positions and registers.
382
383 static const Register INT_ArgReg[java_return_convention_max_int] = {
384 r0 /* j_rarg7 */, j_rarg6, j_rarg5, j_rarg4, j_rarg3, j_rarg2, j_rarg1, j_rarg0
385 };
386
387 static const FloatRegister FP_ArgReg[java_return_convention_max_float] = {
388 j_farg0, j_farg1, j_farg2, j_farg3, j_farg4, j_farg5, j_farg6, j_farg7
389 };
390
391 uint int_args = 0;
392 uint fp_args = 0;
393
394 for (int i = 0; i < total_args_passed; i++) {
395 switch (sig_bt[i]) {
396 case T_BOOLEAN:
397 case T_CHAR:
398 case T_BYTE:
399 case T_SHORT:
400 case T_INT:
401 if (int_args < SharedRuntime::java_return_convention_max_int) {
402 regs[i].set1(INT_ArgReg[int_args]->as_VMReg());
403 int_args ++;
404 } else {
405 return -1;
406 }
407 break;
408 case T_VOID:
409 // halves of T_LONG or T_DOUBLE
410 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
411 regs[i].set_bad();
412 break;
413 case T_LONG:
414 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
415 // fall through
416 case T_OBJECT:
417 case T_ARRAY:
418 case T_ADDRESS:
419 // Should T_METADATA be added to java_calling_convention as well ?
420 case T_METADATA:
421 case T_PRIMITIVE_OBJECT:
422 if (int_args < SharedRuntime::java_return_convention_max_int) {
423 regs[i].set2(INT_ArgReg[int_args]->as_VMReg());
424 int_args ++;
425 } else {
426 return -1;
427 }
428 break;
429 case T_FLOAT:
430 if (fp_args < SharedRuntime::java_return_convention_max_float) {
431 regs[i].set1(FP_ArgReg[fp_args]->as_VMReg());
432 fp_args ++;
433 } else {
434 return -1;
435 }
436 break;
437 case T_DOUBLE:
438 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
439 if (fp_args < SharedRuntime::java_return_convention_max_float) {
440 regs[i].set2(FP_ArgReg[fp_args]->as_VMReg());
441 fp_args ++;
442 } else {
443 return -1;
444 }
445 break;
446 default:
447 ShouldNotReachHere();
448 break;
449 }
450 }
451
452 return int_args + fp_args;
453 }
454
455 // Patch the callers callsite with entry to compiled code if it exists.
456 static void patch_callers_callsite(MacroAssembler *masm) {
457 Label L;
458 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
459 __ cbz(rscratch1, L);
460
461 __ enter();
462 __ push_CPU_state();
463
464 // VM needs caller's callsite
465 // VM needs target method
466 // This needs to be a long call since we will relocate this adapter to
467 // the codeBuffer and it may not reach
468
469 #ifndef PRODUCT
470 assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
471 #endif
472
473 __ mov(c_rarg0, rmethod);
474 __ mov(c_rarg1, lr);
475 __ authenticate_return_address(c_rarg1, rscratch1);
476 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
477 __ blr(rscratch1);
478
479 // Explicit isb required because fixup_callers_callsite may change the code
480 // stream.
481 __ safepoint_isb();
482
483 __ pop_CPU_state();
484 // restore sp
485 __ leave();
486 __ bind(L);
487 }
488
489 // For each inline type argument, sig includes the list of fields of
490 // the inline type. This utility function computes the number of
491 // arguments for the call if inline types are passed by reference (the
492 // calling convention the interpreter expects).
493 static int compute_total_args_passed_int(const GrowableArray<SigEntry>* sig_extended) {
494 int total_args_passed = 0;
495 if (InlineTypePassFieldsAsArgs) {
496 for (int i = 0; i < sig_extended->length(); i++) {
497 BasicType bt = sig_extended->at(i)._bt;
498 if (bt == T_PRIMITIVE_OBJECT) {
499 // In sig_extended, an inline type argument starts with:
500 // T_PRIMITIVE_OBJECT, followed by the types of the fields of the
501 // inline type and T_VOID to mark the end of the value
502 // type. Inline types are flattened so, for instance, in the
503 // case of an inline type with an int field and an inline type
504 // field that itself has 2 fields, an int and a long:
505 // T_PRIMITIVE_OBJECT T_INT T_PRIMITIVE_OBJECT T_INT T_LONG T_VOID (second
506 // slot for the T_LONG) T_VOID (inner T_PRIMITIVE_OBJECT) T_VOID
507 // (outer T_PRIMITIVE_OBJECT)
508 total_args_passed++;
509 int vt = 1;
510 do {
511 i++;
512 BasicType bt = sig_extended->at(i)._bt;
513 BasicType prev_bt = sig_extended->at(i-1)._bt;
514 if (bt == T_PRIMITIVE_OBJECT) {
515 vt++;
516 } else if (bt == T_VOID &&
517 prev_bt != T_LONG &&
518 prev_bt != T_DOUBLE) {
519 vt--;
520 }
521 } while (vt != 0);
522 } else {
523 total_args_passed++;
524 }
525 }
526 } else {
527 total_args_passed = sig_extended->length();
528 }
529
530 return total_args_passed;
531 }
532
533
534 static void gen_c2i_adapter_helper(MacroAssembler* masm,
535 BasicType bt,
536 BasicType prev_bt,
537 size_t size_in_bytes,
538 const VMRegPair& reg_pair,
539 const Address& to,
540 Register tmp1,
541 Register tmp2,
542 Register tmp3,
543 int extraspace,
544 bool is_oop) {
545 assert(bt != T_PRIMITIVE_OBJECT || !InlineTypePassFieldsAsArgs, "no inline type here");
546 if (bt == T_VOID) {
547 assert(prev_bt == T_LONG || prev_bt == T_DOUBLE, "missing half");
548 return;
549 }
550
551 // Say 4 args:
552 // i st_off
553 // 0 32 T_LONG
554 // 1 24 T_VOID
555 // 2 16 T_OBJECT
556 // 3 8 T_BOOL
557 // - 0 return address
558 //
559 // However to make thing extra confusing. Because we can fit a Java long/double in
560 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
561 // leaves one slot empty and only stores to a single slot. In this case the
562 // slot that is occupied is the T_VOID slot. See I said it was confusing.
563
564 bool wide = (size_in_bytes == wordSize);
565 VMReg r_1 = reg_pair.first();
566 VMReg r_2 = reg_pair.second();
567 assert(r_2->is_valid() == wide, "invalid size");
568 if (!r_1->is_valid()) {
569 assert(!r_2->is_valid(), "");
570 return;
571 }
572
573 if (!r_1->is_FloatRegister()) {
574 Register val = r25;
575 if (r_1->is_stack()) {
576 // memory to memory use r25 (scratch registers is used by store_heap_oop)
577 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
578 __ load_sized_value(val, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
579 } else {
580 val = r_1->as_Register();
581 }
582 assert_different_registers(to.base(), val, tmp1, tmp2, tmp3);
583 if (is_oop) {
584 __ store_heap_oop(to, val, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
585 } else {
586 __ store_sized_value(to, val, size_in_bytes);
587 }
588 } else {
589 if (wide) {
590 __ strd(r_1->as_FloatRegister(), to);
591 } else {
592 // only a float use just part of the slot
593 __ strs(r_1->as_FloatRegister(), to);
594 }
595 }
596 }
597
598 static void gen_c2i_adapter(MacroAssembler *masm,
599 const GrowableArray<SigEntry>* sig_extended,
600 const VMRegPair *regs,
601 bool requires_clinit_barrier,
602 address& c2i_no_clinit_check_entry,
603 Label& skip_fixup,
604 address start,
605 OopMapSet* oop_maps,
606 int& frame_complete,
607 int& frame_size_in_words,
608 bool alloc_inline_receiver) {
609 if (requires_clinit_barrier && VM_Version::supports_fast_class_init_checks()) {
610 Label L_skip_barrier;
611
612 { // Bypass the barrier for non-static methods
613 __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
614 __ andsw(zr, rscratch1, JVM_ACC_STATIC);
615 __ br(Assembler::EQ, L_skip_barrier); // non-static
616 }
617
618 __ load_method_holder(rscratch2, rmethod);
619 __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
620 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
621
622 __ bind(L_skip_barrier);
623 c2i_no_clinit_check_entry = __ pc();
624 }
625
626 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
627 bs->c2i_entry_barrier(masm);
628
629 // Before we get into the guts of the C2I adapter, see if we should be here
630 // at all. We've come from compiled code and are attempting to jump to the
631 // interpreter, which means the caller made a static call to get here
632 // (vcalls always get a compiled target if there is one). Check for a
633 // compiled target. If there is one, we need to patch the caller's call.
634 patch_callers_callsite(masm);
635
636 __ bind(skip_fixup);
637
638 // Name some registers to be used in the following code. We can use
639 // anything except r0-r7 which are arguments in the Java calling
640 // convention, rmethod (r12), and r13 which holds the outgoing sender
641 // SP for the interpreter.
642 Register buf_array = r10; // Array of buffered inline types
643 Register buf_oop = r11; // Buffered inline type oop
644 Register tmp1 = r15;
645 Register tmp2 = r16;
646 Register tmp3 = r17;
647
648 if (InlineTypePassFieldsAsArgs) {
649 // Is there an inline type argument?
650 bool has_inline_argument = false;
651 for (int i = 0; i < sig_extended->length() && !has_inline_argument; i++) {
652 has_inline_argument = (sig_extended->at(i)._bt == T_PRIMITIVE_OBJECT);
653 }
654 if (has_inline_argument) {
655 // There is at least an inline type argument: we're coming from
656 // compiled code so we have no buffers to back the inline types
657 // Allocate the buffers here with a runtime call.
658 RegisterSaver reg_save(false /* save_vectors */);
659 OopMap* map = reg_save.save_live_registers(masm, 0, &frame_size_in_words);
660
661 frame_complete = __ offset();
662 address the_pc = __ pc();
663
664 Label retaddr;
665 __ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
666
667 __ mov(c_rarg0, rthread);
668 __ mov(c_rarg1, rmethod);
669 __ mov(c_rarg2, (int64_t)alloc_inline_receiver);
670
671 __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::allocate_inline_types)));
672 __ blr(rscratch1);
673 __ bind(retaddr);
674
675 oop_maps->add_gc_map(__ pc() - start, map);
676 __ reset_last_Java_frame(false);
677
678 reg_save.restore_live_registers(masm);
679
680 Label no_exception;
681 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
682 __ cbz(rscratch1, no_exception);
683
684 __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
685 __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
686 __ b(RuntimeAddress(StubRoutines::forward_exception_entry()));
687
688 __ bind(no_exception);
689
690 // We get an array of objects from the runtime call
691 __ get_vm_result(buf_array, rthread);
692 __ get_vm_result_2(rmethod, rthread); // TODO: required to keep the callee Method live?
693 }
694 }
695
696 // Since all args are passed on the stack, total_args_passed *
697 // Interpreter::stackElementSize is the space we need.
698
699 int total_args_passed = compute_total_args_passed_int(sig_extended);
700 int extraspace = total_args_passed * Interpreter::stackElementSize;
701
702 // stack is aligned, keep it that way
703 extraspace = align_up(extraspace, StackAlignmentInBytes);
704
705 // set senderSP value
706 __ mov(r19_sender_sp, sp);
707
708 __ sub(sp, sp, extraspace);
709
710 // Now write the args into the outgoing interpreter space
711
712 // next_arg_comp is the next argument from the compiler point of
713 // view (inline type fields are passed in registers/on the stack). In
714 // sig_extended, an inline type argument starts with: T_PRIMITIVE_OBJECT,
715 // followed by the types of the fields of the inline type and T_VOID
716 // to mark the end of the inline type. ignored counts the number of
717 // T_PRIMITIVE_OBJECT/T_VOID. next_vt_arg is the next inline type argument:
718 // used to get the buffer for that argument from the pool of buffers
719 // we allocated above and want to pass to the
720 // interpreter. next_arg_int is the next argument from the
721 // interpreter point of view (inline types are passed by reference).
722 for (int next_arg_comp = 0, ignored = 0, next_vt_arg = 0, next_arg_int = 0;
723 next_arg_comp < sig_extended->length(); next_arg_comp++) {
724 assert(ignored <= next_arg_comp, "shouldn't skip over more slots than there are arguments");
725 assert(next_arg_int <= total_args_passed, "more arguments for the interpreter than expected?");
726 BasicType bt = sig_extended->at(next_arg_comp)._bt;
727 int st_off = (total_args_passed - next_arg_int - 1) * Interpreter::stackElementSize;
728 if (!InlineTypePassFieldsAsArgs || bt != T_PRIMITIVE_OBJECT) {
729 int next_off = st_off - Interpreter::stackElementSize;
730 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : st_off;
731 const VMRegPair reg_pair = regs[next_arg_comp-ignored];
732 size_t size_in_bytes = reg_pair.second()->is_valid() ? 8 : 4;
733 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
734 size_in_bytes, reg_pair, Address(sp, offset), tmp1, tmp2, tmp3, extraspace, false);
735 next_arg_int++;
736 #ifdef ASSERT
737 if (bt == T_LONG || bt == T_DOUBLE) {
738 // Overwrite the unused slot with known junk
739 __ mov(rscratch1, CONST64(0xdeadffffdeadaaaa));
740 __ str(rscratch1, Address(sp, st_off));
741 }
742 #endif /* ASSERT */
743 } else {
744 ignored++;
745 // get the buffer from the just allocated pool of buffers
746 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + next_vt_arg * type2aelembytes(T_PRIMITIVE_OBJECT);
747 __ load_heap_oop(buf_oop, Address(buf_array, index), tmp1, tmp2);
748 next_vt_arg++; next_arg_int++;
749 int vt = 1;
750 // write fields we get from compiled code in registers/stack
751 // slots to the buffer: we know we are done with that inline type
752 // argument when we hit the T_VOID that acts as an end of inline
753 // type delimiter for this inline type. Inline types are flattened
754 // so we might encounter embedded inline types. Each entry in
755 // sig_extended contains a field offset in the buffer.
756 Label L_null;
757 do {
758 next_arg_comp++;
759 BasicType bt = sig_extended->at(next_arg_comp)._bt;
760 BasicType prev_bt = sig_extended->at(next_arg_comp - 1)._bt;
761 if (bt == T_PRIMITIVE_OBJECT) {
762 vt++;
763 ignored++;
764 } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
765 vt--;
766 ignored++;
767 } else {
768 int off = sig_extended->at(next_arg_comp)._offset;
769 if (off == -1) {
770 // Nullable inline type argument, emit null check
771 VMReg reg = regs[next_arg_comp-ignored].first();
772 Label L_notNull;
773 if (reg->is_stack()) {
774 int ld_off = reg->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
775 __ ldrb(tmp1, Address(sp, ld_off));
776 __ cbnz(tmp1, L_notNull);
777 } else {
778 __ cbnz(reg->as_Register(), L_notNull);
779 }
780 __ str(zr, Address(sp, st_off));
781 __ b(L_null);
782 __ bind(L_notNull);
783 continue;
784 }
785 assert(off > 0, "offset in object should be positive");
786 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
787 bool is_oop = is_reference_type(bt);
788 gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended->at(next_arg_comp-1)._bt : T_ILLEGAL,
789 size_in_bytes, regs[next_arg_comp-ignored], Address(buf_oop, off), tmp1, tmp2, tmp3, extraspace, is_oop);
790 }
791 } while (vt != 0);
792 // pass the buffer to the interpreter
793 __ str(buf_oop, Address(sp, st_off));
794 __ bind(L_null);
795 }
796 }
797
798 __ mov(esp, sp); // Interp expects args on caller's expression stack
799
800 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset())));
801 __ br(rscratch1);
802 }
803
804 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
805
806
807 // Note: r19_sender_sp contains the senderSP on entry. We must
808 // preserve it since we may do a i2c -> c2i transition if we lose a
809 // race where compiled code goes non-entrant while we get args
810 // ready.
811
812 // Adapters are frameless.
813
814 // An i2c adapter is frameless because the *caller* frame, which is
815 // interpreted, routinely repairs its own esp (from
816 // interpreter_frame_last_sp), even if a callee has modified the
817 // stack pointer. It also recalculates and aligns sp.
818
819 // A c2i adapter is frameless because the *callee* frame, which is
820 // interpreted, routinely repairs its caller's sp (from sender_sp,
821 // which is set up via the senderSP register).
822
823 // In other words, if *either* the caller or callee is interpreted, we can
824 // get the stack pointer repaired after a call.
825
848 range_check(masm, rax, r11,
849 StubRoutines::initial_stubs_code()->code_begin(),
850 StubRoutines::initial_stubs_code()->code_end(),
851 L_ok);
852 }
853 if (StubRoutines::final_stubs_code() != nullptr) {
854 range_check(masm, rax, r11,
855 StubRoutines::final_stubs_code()->code_begin(),
856 StubRoutines::final_stubs_code()->code_end(),
857 L_ok);
858 }
859 const char* msg = "i2c adapter must return to an interpreter frame";
860 __ block_comment(msg);
861 __ stop(msg);
862 __ bind(L_ok);
863 __ block_comment("} verify_i2ce ");
864 #endif
865 }
866
867 // Cut-out for having no stack args.
868 int comp_words_on_stack = 0;
869 if (comp_args_on_stack) {
870 comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord;
871 __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
872 __ andr(sp, rscratch1, -16);
873 }
874
875 // Will jump to the compiled code just as if compiled code was doing it.
876 // Pre-load the register-jump target early, to schedule it better.
877 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_inline_offset())));
878
879 #if INCLUDE_JVMCI
880 if (EnableJVMCI) {
881 // check if this call should be routed towards a specific entry point
882 __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
883 Label no_alternative_target;
884 __ cbz(rscratch2, no_alternative_target);
885 __ mov(rscratch1, rscratch2);
886 __ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
887 __ bind(no_alternative_target);
888 }
889 #endif // INCLUDE_JVMCI
890
891 int total_args_passed = sig->length();
892
893 // Now generate the shuffle code.
894 for (int i = 0; i < total_args_passed; i++) {
895 BasicType bt = sig->at(i)._bt;
896
897 assert(bt != T_PRIMITIVE_OBJECT, "i2c adapter doesn't unpack inline typ args");
898 if (bt == T_VOID) {
899 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
900 continue;
901 }
902
903 // Pick up 0, 1 or 2 words from SP+offset.
904 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
905
906 // Load in argument order going down.
907 int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize;
908 // Point to interpreter value (vs. tag)
909 int next_off = ld_off - Interpreter::stackElementSize;
910 //
911 //
912 //
913 VMReg r_1 = regs[i].first();
914 VMReg r_2 = regs[i].second();
915 if (!r_1->is_valid()) {
916 assert(!r_2->is_valid(), "");
917 continue;
918 }
919 if (r_1->is_stack()) {
920 // Convert stack slot to an SP offset (+ wordSize to account for return address )
921 int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size;
922 if (!r_2->is_valid()) {
923 // sign extend???
924 __ ldrsw(rscratch2, Address(esp, ld_off));
925 __ str(rscratch2, Address(sp, st_off));
926 } else {
927 //
928 // We are using two optoregs. This can be either T_OBJECT,
929 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
930 // two slots but only uses one for thr T_LONG or T_DOUBLE case
931 // So we must adjust where to pick up the data to match the
932 // interpreter.
933 //
934 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
935 // are accessed as negative so LSW is at LOW address
936
937 // ld_off is MSW so get LSW
938 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
939 __ ldr(rscratch2, Address(esp, offset));
940 // st_off is LSW (i.e. reg.first())
941 __ str(rscratch2, Address(sp, st_off));
942 }
943 } else if (r_1->is_Register()) { // Register argument
944 Register r = r_1->as_Register();
945 if (r_2->is_valid()) {
946 //
947 // We are using two VMRegs. This can be either T_OBJECT,
948 // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates
949 // two slots but only uses one for thr T_LONG or T_DOUBLE case
950 // So we must adjust where to pick up the data to match the
951 // interpreter.
952
953 const int offset = (bt == T_LONG || bt == T_DOUBLE) ? next_off : ld_off;
954
955 // this can be a misaligned move
956 __ ldr(r, Address(esp, offset));
957 } else {
958 // sign extend and use a full word?
959 __ ldrw(r, Address(esp, ld_off));
960 }
961 } else {
962 if (!r_2->is_valid()) {
963 __ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off));
964 } else {
965 __ ldrd(r_1->as_FloatRegister(), Address(esp, next_off));
966 }
967 }
968 }
969
970
971 __ mov(rscratch2, rscratch1);
972 __ push_cont_fastpath(rthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about; kills rscratch1
973 __ mov(rscratch1, rscratch2);
974
975 // 6243940 We might end up in handle_wrong_method if
976 // the callee is deoptimized as we race thru here. If that
977 // happens we don't want to take a safepoint because the
978 // caller frame will look interpreted and arguments are now
979 // "compiled" so it is much better to make this transition
980 // invisible to the stack walking code. Unfortunately if
981 // we try and find the callee by normal means a safepoint
982 // is possible. So we stash the desired callee in the thread
983 // and the vm will find there should this case occur.
984
985 __ str(rmethod, Address(rthread, JavaThread::callee_target_offset()));
986 __ br(rscratch1);
987 }
988
989 static void gen_inline_cache_check(MacroAssembler *masm, Label& skip_fixup) {
990
991 Label ok;
992
993 Register holder = rscratch2;
994 Register receiver = j_rarg0;
995 Register tmp = r10; // A call-clobbered register not used for arg passing
996
997 // -------------------------------------------------------------------------
998 // Generate a C2I adapter. On entry we know rmethod holds the Method* during calls
999 // to the interpreter. The args start out packed in the compiled layout. They
1000 // need to be unpacked into the interpreter layout. This will almost always
1001 // require some stack space. We grow the current (compiled) stack, then repack
1002 // the args. We finally end in a jump to the generic interpreter entry point.
1003 // On exit from the interpreter, the interpreter will restore our SP (lest the
1004 // compiled code, which relies solely on SP and not FP, get sick).
1005
1006 {
1007 __ block_comment("c2i_unverified_entry {");
1008 __ load_klass(rscratch1, receiver);
1009 __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
1010 __ cmp(rscratch1, tmp);
1011 __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
1012 __ br(Assembler::EQ, ok);
1013 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1014
1015 __ bind(ok);
1016 // Method might have been compiled since the call site was patched to
1017 // interpreted; if that is the case treat it as a miss so we can get
1018 // the call site corrected.
1019 __ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
1020 __ cbz(rscratch1, skip_fixup);
1021 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1022 __ block_comment("} c2i_unverified_entry");
1023 }
1024 }
1025
1026
1027 // ---------------------------------------------------------------
1028 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
1029 int comp_args_on_stack,
1030 const GrowableArray<SigEntry>* sig,
1031 const VMRegPair* regs,
1032 const GrowableArray<SigEntry>* sig_cc,
1033 const VMRegPair* regs_cc,
1034 const GrowableArray<SigEntry>* sig_cc_ro,
1035 const VMRegPair* regs_cc_ro,
1036 AdapterFingerPrint* fingerprint,
1037 AdapterBlob*& new_adapter,
1038 bool allocate_code_blob) {
1039
1040 address i2c_entry = __ pc();
1041 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
1042
1043 address c2i_unverified_entry = __ pc();
1044 address c2i_unverified_inline_entry = __ pc();
1045 Label skip_fixup;
1046
1047 gen_inline_cache_check(masm, skip_fixup);
1048
1049 OopMapSet* oop_maps = new OopMapSet();
1050 int frame_complete = CodeOffsets::frame_never_safe;
1051 int frame_size_in_words = 0;
1052
1053 // Scalarized c2i adapter with non-scalarized receiver (i.e., don't pack receiver)
1054 address c2i_no_clinit_check_entry = nullptr;
1055 address c2i_inline_ro_entry = __ pc();
1056 if (regs_cc != regs_cc_ro) {
1057 // No class init barrier needed because method is guaranteed to be non-static
1058 gen_c2i_adapter(masm, sig_cc_ro, regs_cc_ro, /* requires_clinit_barrier = */ false, c2i_no_clinit_check_entry,
1059 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1060 skip_fixup.reset();
1061 }
1062
1063 // Scalarized c2i adapter
1064 address c2i_entry = __ pc();
1065 address c2i_inline_entry = __ pc();
1066 gen_c2i_adapter(masm, sig_cc, regs_cc, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1067 skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ true);
1068
1069 // Non-scalarized c2i adapter
1070 if (regs != regs_cc) {
1071 c2i_unverified_inline_entry = __ pc();
1072 Label inline_entry_skip_fixup;
1073 gen_inline_cache_check(masm, inline_entry_skip_fixup);
1074
1075 c2i_inline_entry = __ pc();
1076 gen_c2i_adapter(masm, sig, regs, /* requires_clinit_barrier = */ true, c2i_no_clinit_check_entry,
1077 inline_entry_skip_fixup, i2c_entry, oop_maps, frame_complete, frame_size_in_words, /* alloc_inline_receiver = */ false);
1078 }
1079
1080
1081 // The c2i adapter might safepoint and trigger a GC. The caller must make sure that
1082 // the GC knows about the location of oop argument locations passed to the c2i adapter.
1083 if (allocate_code_blob) {
1084 bool caller_must_gc_arguments = (regs != regs_cc);
1085 new_adapter = AdapterBlob::create(masm->code(), frame_complete, frame_size_in_words, oop_maps, caller_must_gc_arguments);
1086 }
1087
1088 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry, c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
1089 }
1090
1091 static int c_calling_convention_priv(const BasicType *sig_bt,
1092 VMRegPair *regs,
1093 VMRegPair *regs2,
1094 int total_args_passed) {
1095 assert(regs2 == nullptr, "not needed on AArch64");
1096
1097 // We return the amount of VMRegImpl stack slots we need to reserve for all
1098 // the arguments NOT counting out_preserve_stack_slots.
1099
1100 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1101 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7
1102 };
1103 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1104 c_farg0, c_farg1, c_farg2, c_farg3,
1105 c_farg4, c_farg5, c_farg6, c_farg7
1106 };
1107
1108 uint int_args = 0;
1116 case T_BYTE:
1117 case T_SHORT:
1118 case T_INT:
1119 if (int_args < Argument::n_int_register_parameters_c) {
1120 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1121 } else {
1122 #ifdef __APPLE__
1123 // Less-than word types are stored one after another.
1124 // The code is unable to handle this so bailout.
1125 return -1;
1126 #endif
1127 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1128 stk_args += 2;
1129 }
1130 break;
1131 case T_LONG:
1132 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1133 // fall through
1134 case T_OBJECT:
1135 case T_ARRAY:
1136 case T_PRIMITIVE_OBJECT:
1137 case T_ADDRESS:
1138 case T_METADATA:
1139 if (int_args < Argument::n_int_register_parameters_c) {
1140 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1141 } else {
1142 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1143 stk_args += 2;
1144 }
1145 break;
1146 case T_FLOAT:
1147 if (fp_args < Argument::n_float_register_parameters_c) {
1148 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1149 } else {
1150 #ifdef __APPLE__
1151 // Less-than word types are stored one after another.
1152 // The code is unable to handle this so bailout.
1153 return -1;
1154 #endif
1155 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1156 stk_args += 2;
1941 int temploc = -1;
1942 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1943 int i = arg_order.at(ai);
1944 int c_arg = arg_order.at(ai + 1);
1945 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1946 assert(c_arg != -1 && i != -1, "wrong order");
1947 #ifdef ASSERT
1948 if (in_regs[i].first()->is_Register()) {
1949 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1950 } else if (in_regs[i].first()->is_FloatRegister()) {
1951 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1952 }
1953 if (out_regs[c_arg].first()->is_Register()) {
1954 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1955 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1956 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1957 }
1958 #endif /* ASSERT */
1959 switch (in_sig_bt[i]) {
1960 case T_ARRAY:
1961 case T_PRIMITIVE_OBJECT:
1962 case T_OBJECT:
1963 __ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1964 ((i == 0) && (!is_static)),
1965 &receiver_offset);
1966 int_args++;
1967 break;
1968 case T_VOID:
1969 break;
1970
1971 case T_FLOAT:
1972 __ float_move(in_regs[i], out_regs[c_arg]);
1973 float_args++;
1974 break;
1975
1976 case T_DOUBLE:
1977 assert( i + 1 < total_in_args &&
1978 in_sig_bt[i + 1] == T_VOID &&
1979 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1980 __ double_move(in_regs[i], out_regs[c_arg]);
1981 float_args++;
2058 if (method->is_synchronized()) {
2059 Label count;
2060 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2061
2062 // Get the handle (the 2nd argument)
2063 __ mov(oop_handle_reg, c_rarg1);
2064
2065 // Get address of the box
2066
2067 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2068
2069 // Load the oop from the handle
2070 __ ldr(obj_reg, Address(oop_handle_reg, 0));
2071
2072 if (LockingMode == LM_MONITOR) {
2073 __ b(slow_path_lock);
2074 } else if (LockingMode == LM_LEGACY) {
2075 // Load (object->mark() | 1) into swap_reg %r0
2076 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2077 __ orr(swap_reg, rscratch1, 1);
2078 if (EnableValhalla) {
2079 // Mask inline_type bit such that we go to the slow path if object is an inline type
2080 __ andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
2081 }
2082
2083 // Save (object->mark() | 1) into BasicLock's displaced header
2084 __ str(swap_reg, Address(lock_reg, mark_word_offset));
2085
2086 // src -> dest iff dest == r0 else r0 <- dest
2087 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
2088
2089 // Hmm should this move to the slow path code area???
2090
2091 // Test if the oopMark is an obvious stack pointer, i.e.,
2092 // 1) (mark & 3) == 0, and
2093 // 2) sp <= mark < mark + os::pagesize()
2094 // These 3 tests can be done by evaluating the following
2095 // expression: ((mark - sp) & (3 - os::vm_page_size())),
2096 // assuming both stack pointer and pagesize have their
2097 // least significant 2 bits clear.
2098 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
2099
2100 __ sub(swap_reg, sp, swap_reg);
2101 __ neg(swap_reg, swap_reg);
2129
2130 __ rt_call(native_func);
2131
2132 __ bind(native_return);
2133
2134 intptr_t return_pc = (intptr_t) __ pc();
2135 oop_maps->add_gc_map(return_pc - start, map);
2136
2137 // Unpack native results.
2138 switch (ret_type) {
2139 case T_BOOLEAN: __ c2bool(r0); break;
2140 case T_CHAR : __ ubfx(r0, r0, 0, 16); break;
2141 case T_BYTE : __ sbfx(r0, r0, 0, 8); break;
2142 case T_SHORT : __ sbfx(r0, r0, 0, 16); break;
2143 case T_INT : __ sbfx(r0, r0, 0, 32); break;
2144 case T_DOUBLE :
2145 case T_FLOAT :
2146 // Result is in v0 we'll save as needed
2147 break;
2148 case T_ARRAY: // Really a handle
2149 case T_PRIMITIVE_OBJECT: // Really a handle
2150 case T_OBJECT: // Really a handle
2151 break; // can't de-handlize until after safepoint check
2152 case T_VOID: break;
2153 case T_LONG: break;
2154 default : ShouldNotReachHere();
2155 }
2156
2157 Label safepoint_in_progress, safepoint_in_progress_done;
2158 Label after_transition;
2159
2160 // Switch thread to "native transition" state before reading the synchronization state.
2161 // This additional state is necessary because reading and testing the synchronization
2162 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2163 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2164 // VM thread changes sync state to synchronizing and suspends threads for GC.
2165 // Thread A is resumed to finish this native method, but doesn't block here since it
2166 // didn't see any synchronization is progress, and escapes.
2167 __ mov(rscratch1, _thread_in_native_trans);
2168
2169 __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
3386 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
3387 #endif
3388 // Clear the exception oop so GC no longer processes it as a root.
3389 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
3390
3391 // r0: exception oop
3392 // r8: exception handler
3393 // r4: exception pc
3394 // Jump to handler
3395
3396 __ br(r8);
3397
3398 // Make sure all code is generated
3399 masm->flush();
3400
3401 // Set exception blob
3402 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3403 }
3404
3405 #endif // COMPILER2
3406
3407 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
3408 BufferBlob* buf = BufferBlob::create("inline types pack/unpack", 16 * K);
3409 CodeBuffer buffer(buf);
3410 short buffer_locs[20];
3411 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3412 sizeof(buffer_locs)/sizeof(relocInfo));
3413
3414 MacroAssembler _masm(&buffer);
3415 MacroAssembler* masm = &_masm;
3416
3417 const Array<SigEntry>* sig_vk = vk->extended_sig();
3418 const Array<VMRegPair>* regs = vk->return_regs();
3419
3420 int pack_fields_jobject_off = __ offset();
3421 // Resolve pre-allocated buffer from JNI handle.
3422 // We cannot do this in generate_call_stub() because it requires GC code to be initialized.
3423 Register Rresult = r14; // See StubGenerator::generate_call_stub().
3424 __ ldr(r0, Address(Rresult));
3425 __ resolve_jobject(r0 /* value */,
3426 rthread /* thread */,
3427 r12 /* tmp */);
3428 __ str(r0, Address(Rresult));
3429
3430 int pack_fields_off = __ offset();
3431
3432 int j = 1;
3433 for (int i = 0; i < sig_vk->length(); i++) {
3434 BasicType bt = sig_vk->at(i)._bt;
3435 if (bt == T_PRIMITIVE_OBJECT) {
3436 continue;
3437 }
3438 if (bt == T_VOID) {
3439 if (sig_vk->at(i-1)._bt == T_LONG ||
3440 sig_vk->at(i-1)._bt == T_DOUBLE) {
3441 j++;
3442 }
3443 continue;
3444 }
3445 int off = sig_vk->at(i)._offset;
3446 VMRegPair pair = regs->at(j);
3447 VMReg r_1 = pair.first();
3448 VMReg r_2 = pair.second();
3449 Address to(r0, off);
3450 if (bt == T_FLOAT) {
3451 __ strs(r_1->as_FloatRegister(), to);
3452 } else if (bt == T_DOUBLE) {
3453 __ strd(r_1->as_FloatRegister(), to);
3454 } else {
3455 Register val = r_1->as_Register();
3456 assert_different_registers(to.base(), val, r15, r16, r17);
3457 if (is_reference_type(bt)) {
3458 __ store_heap_oop(to, val, r15, r16, r17, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED);
3459 } else {
3460 __ store_sized_value(to, r_1->as_Register(), type2aelembytes(bt));
3461 }
3462 }
3463 j++;
3464 }
3465 assert(j == regs->length(), "missed a field?");
3466
3467 __ ret(lr);
3468
3469 int unpack_fields_off = __ offset();
3470
3471 Label skip;
3472 __ cbz(r0, skip);
3473
3474 j = 1;
3475 for (int i = 0; i < sig_vk->length(); i++) {
3476 BasicType bt = sig_vk->at(i)._bt;
3477 if (bt == T_PRIMITIVE_OBJECT) {
3478 continue;
3479 }
3480 if (bt == T_VOID) {
3481 if (sig_vk->at(i-1)._bt == T_LONG ||
3482 sig_vk->at(i-1)._bt == T_DOUBLE) {
3483 j++;
3484 }
3485 continue;
3486 }
3487 int off = sig_vk->at(i)._offset;
3488 assert(off > 0, "offset in object should be positive");
3489 VMRegPair pair = regs->at(j);
3490 VMReg r_1 = pair.first();
3491 VMReg r_2 = pair.second();
3492 Address from(r0, off);
3493 if (bt == T_FLOAT) {
3494 __ ldrs(r_1->as_FloatRegister(), from);
3495 } else if (bt == T_DOUBLE) {
3496 __ ldrd(r_1->as_FloatRegister(), from);
3497 } else if (bt == T_OBJECT || bt == T_ARRAY) {
3498 assert_different_registers(r0, r_1->as_Register());
3499 __ load_heap_oop(r_1->as_Register(), from, rscratch1, rscratch2);
3500 } else {
3501 assert(is_java_primitive(bt), "unexpected basic type");
3502 assert_different_registers(r0, r_1->as_Register());
3503
3504 size_t size_in_bytes = type2aelembytes(bt);
3505 __ load_sized_value(r_1->as_Register(), from, size_in_bytes, bt != T_CHAR && bt != T_BOOLEAN);
3506 }
3507 j++;
3508 }
3509 assert(j == regs->length(), "missed a field?");
3510
3511 __ bind(skip);
3512
3513 __ ret(lr);
3514
3515 __ flush();
3516
3517 return BufferedInlineTypeBlob::create(&buffer, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
3518 }
|