1 /*
2 * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
447 Label skip;
448
449 __ ldr(Rtemp, Address(Rmethod, Method::code_offset()));
450 __ cbz(Rtemp, skip);
451
452 // Pushing an even number of registers for stack alignment.
453 // Selecting R9, which had to be saved anyway for some platforms.
454 __ push(RegisterSet(R0, R3) | R9 | LR);
455 __ fpush_hardfp(FloatRegisterSet(D0, 8));
456
457 __ mov(R0, Rmethod);
458 __ mov(R1, LR);
459 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
460
461 __ fpop_hardfp(FloatRegisterSet(D0, 8));
462 __ pop(RegisterSet(R0, R3) | R9 | LR);
463
464 __ bind(skip);
465 }
466
467 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
468 int total_args_passed, int comp_args_on_stack,
469 const BasicType *sig_bt, const VMRegPair *regs) {
470 // TODO: ARM - May be can use ldm to load arguments
471 const Register tmp = Rtemp; // avoid erasing R5_mh
472
473 // Next assert may not be needed but safer. Extra analysis required
474 // if this there is not enough free registers and we need to use R5 here.
475 assert_different_registers(tmp, R5_mh);
476
477 // 6243940 We might end up in handle_wrong_method if
478 // the callee is deoptimized as we race thru here. If that
479 // happens we don't want to take a safepoint because the
480 // caller frame will look interpreted and arguments are now
481 // "compiled" so it is much better to make this transition
482 // invisible to the stack walking code. Unfortunately if
483 // we try and find the callee by normal means a safepoint
484 // is possible. So we stash the desired callee in the thread
485 // and the vm will find there should this case occur.
486 Address callee_target_addr(Rthread, JavaThread::callee_target_offset());
487 __ str(Rmethod, callee_target_addr);
488
489
490 assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, Rmethod);
491
492 const Register initial_sp = Rmethod; // temporarily scratched
493
494 // Old code was modifying R4 but this looks unsafe (particularly with JSR292)
495 assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, initial_sp);
496
497 __ mov(initial_sp, SP);
498
499 if (comp_args_on_stack) {
500 __ sub_slow(SP, SP, comp_args_on_stack * VMRegImpl::stack_slot_size);
501 }
502 __ bic(SP, SP, StackAlignmentInBytes - 1);
503
504 for (int i = 0; i < total_args_passed; i++) {
505 if (sig_bt[i] == T_VOID) {
506 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
507 continue;
508 }
509 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "must be ordered");
510 int arg_offset = Interpreter::expr_offset_in_bytes(total_args_passed - 1 - i);
511
512 VMReg r_1 = regs[i].first();
513 VMReg r_2 = regs[i].second();
514 if (r_1->is_stack()) {
515 int stack_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size;
516 if (!r_2->is_valid()) {
517 __ ldr(tmp, Address(initial_sp, arg_offset));
518 __ str(tmp, Address(SP, stack_offset));
519 } else {
520 __ ldr(tmp, Address(initial_sp, arg_offset - Interpreter::stackElementSize));
521 __ str(tmp, Address(SP, stack_offset));
522 __ ldr(tmp, Address(initial_sp, arg_offset));
523 __ str(tmp, Address(SP, stack_offset + wordSize));
524 }
525 } else if (r_1->is_Register()) {
526 if (!r_2->is_valid()) {
532 } else if (r_1->is_FloatRegister()) {
533 #ifdef __SOFTFP__
534 ShouldNotReachHere();
535 #endif // __SOFTFP__
536 if (!r_2->is_valid()) {
537 __ flds(r_1->as_FloatRegister(), Address(initial_sp, arg_offset));
538 } else {
539 __ fldd(r_1->as_FloatRegister(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
540 }
541 } else {
542 assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
543 }
544 }
545
546 // restore Rmethod (scratched for initial_sp)
547 __ ldr(Rmethod, callee_target_addr);
548 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
549
550 }
551
552 static void gen_c2i_adapter(MacroAssembler *masm,
553 int total_args_passed, int comp_args_on_stack,
554 const BasicType *sig_bt, const VMRegPair *regs,
555 Label& skip_fixup) {
556 // TODO: ARM - May be can use stm to deoptimize arguments
557 const Register tmp = Rtemp;
558
559 patch_callers_callsite(masm);
560 __ bind(skip_fixup);
561
562 __ mov(Rsender_sp, SP); // not yet saved
563
564
565 int extraspace = total_args_passed * Interpreter::stackElementSize;
566 if (extraspace) {
567 __ sub_slow(SP, SP, extraspace);
568 }
569
570 for (int i = 0; i < total_args_passed; i++) {
571 if (sig_bt[i] == T_VOID) {
572 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
573 continue;
574 }
575 int stack_offset = (total_args_passed - 1 - i) * Interpreter::stackElementSize;
576
577 VMReg r_1 = regs[i].first();
578 VMReg r_2 = regs[i].second();
579 if (r_1->is_stack()) {
580 int arg_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
581 if (!r_2->is_valid()) {
582 __ ldr(tmp, Address(SP, arg_offset));
583 __ str(tmp, Address(SP, stack_offset));
584 } else {
585 __ ldr(tmp, Address(SP, arg_offset));
586 __ str(tmp, Address(SP, stack_offset - Interpreter::stackElementSize));
587 __ ldr(tmp, Address(SP, arg_offset + wordSize));
588 __ str(tmp, Address(SP, stack_offset));
589 }
590 } else if (r_1->is_Register()) {
591 if (!r_2->is_valid()) {
592 __ str(r_1->as_Register(), Address(SP, stack_offset));
595 __ str(r_2->as_Register(), Address(SP, stack_offset));
596 }
597 } else if (r_1->is_FloatRegister()) {
598 #ifdef __SOFTFP__
599 ShouldNotReachHere();
600 #endif // __SOFTFP__
601 if (!r_2->is_valid()) {
602 __ fsts(r_1->as_FloatRegister(), Address(SP, stack_offset));
603 } else {
604 __ fstd(r_1->as_FloatRegister(), Address(SP, stack_offset - Interpreter::stackElementSize));
605 }
606 } else {
607 assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
608 }
609 }
610
611 __ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()));
612
613 }
614
615 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
616 int total_args_passed,
617 int comp_args_on_stack,
618 const BasicType *sig_bt,
619 const VMRegPair *regs,
620 address entry_address[AdapterBlob::ENTRY_COUNT]) {
621 entry_address[AdapterBlob::I2C] = __ pc();
622 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
623
624 entry_address[AdapterBlob::C2I_Unverified] = __ pc();
625 Label skip_fixup;
626 const Register receiver = R0;
627 const Register holder_klass = Rtemp; // XXX should be OK for C2 but not 100% sure
628
629 __ ic_check(1 /* end_alignment */);
630 __ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
631
632 __ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
633 __ cmp(Rtemp, 0, eq);
634 __ b(skip_fixup, eq);
635 __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
636
637 entry_address[AdapterBlob::C2I] = __ pc();
638 entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
639 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
640 return;
641 }
642
643
644 static int reg2offset_in(VMReg r) {
645 // Account for saved FP and LR
646 return r->reg2stack() * VMRegImpl::stack_slot_size + 2*wordSize;
647 }
648
649 static int reg2offset_out(VMReg r) {
650 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
651 }
652
653
654 static void verify_oop_args(MacroAssembler* masm,
655 const methodHandle& method,
656 const BasicType* sig_bt,
657 const VMRegPair* regs) {
658 Register temp_reg = Rmethod; // not part of any compiled calling seq
659 if (VerifyOops) {
1826 __ reset_last_Java_frame(Rtemp);
1827
1828 __ raw_pop(R1, R2, LR);
1829 __ ret();
1830
1831 OopMapSet* oop_maps = new OopMapSet();
1832 OopMap* map = new OopMap(framesize, 1);
1833 oop_maps->add_gc_map(frame_complete, map);
1834
1835 RuntimeStub* stub =
1836 RuntimeStub::new_runtime_stub(name,
1837 &code,
1838 frame_complete,
1839 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
1840 oop_maps,
1841 false);
1842 return stub;
1843 }
1844
1845 #endif // INCLUDE_JFR
|
1 /*
2 * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
447 Label skip;
448
449 __ ldr(Rtemp, Address(Rmethod, Method::code_offset()));
450 __ cbz(Rtemp, skip);
451
452 // Pushing an even number of registers for stack alignment.
453 // Selecting R9, which had to be saved anyway for some platforms.
454 __ push(RegisterSet(R0, R3) | R9 | LR);
455 __ fpush_hardfp(FloatRegisterSet(D0, 8));
456
457 __ mov(R0, Rmethod);
458 __ mov(R1, LR);
459 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
460
461 __ fpop_hardfp(FloatRegisterSet(D0, 8));
462 __ pop(RegisterSet(R0, R3) | R9 | LR);
463
464 __ bind(skip);
465 }
466
467 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs) {
468
469 // TODO: ARM - May be can use ldm to load arguments
470 const Register tmp = Rtemp; // avoid erasing R5_mh
471
472 // Next assert may not be needed but safer. Extra analysis required
473 // if this there is not enough free registers and we need to use R5 here.
474 assert_different_registers(tmp, R5_mh);
475
476 // 6243940 We might end up in handle_wrong_method if
477 // the callee is deoptimized as we race thru here. If that
478 // happens we don't want to take a safepoint because the
479 // caller frame will look interpreted and arguments are now
480 // "compiled" so it is much better to make this transition
481 // invisible to the stack walking code. Unfortunately if
482 // we try and find the callee by normal means a safepoint
483 // is possible. So we stash the desired callee in the thread
484 // and the vm will find there should this case occur.
485 Address callee_target_addr(Rthread, JavaThread::callee_target_offset());
486 __ str(Rmethod, callee_target_addr);
487
488
489 assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, Rmethod);
490
491 const Register initial_sp = Rmethod; // temporarily scratched
492
493 // Old code was modifying R4 but this looks unsafe (particularly with JSR292)
494 assert_different_registers(tmp, R0, R1, R2, R3, Rsender_sp, initial_sp);
495
496 __ mov(initial_sp, SP);
497
498 if (comp_args_on_stack) {
499 __ sub_slow(SP, SP, comp_args_on_stack * VMRegImpl::stack_slot_size);
500 }
501 __ bic(SP, SP, StackAlignmentInBytes - 1);
502
503 int total_args_passed = sig->length();
504 for (int i = 0; i < total_args_passed; i++) {
505 BasicType bt = sig->at(i)._bt;
506 if (bt == T_VOID) {
507 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
508 continue;
509 }
510 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "must be ordered");
511 int arg_offset = Interpreter::expr_offset_in_bytes(total_args_passed - 1 - i);
512
513 VMReg r_1 = regs[i].first();
514 VMReg r_2 = regs[i].second();
515 if (r_1->is_stack()) {
516 int stack_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size;
517 if (!r_2->is_valid()) {
518 __ ldr(tmp, Address(initial_sp, arg_offset));
519 __ str(tmp, Address(SP, stack_offset));
520 } else {
521 __ ldr(tmp, Address(initial_sp, arg_offset - Interpreter::stackElementSize));
522 __ str(tmp, Address(SP, stack_offset));
523 __ ldr(tmp, Address(initial_sp, arg_offset));
524 __ str(tmp, Address(SP, stack_offset + wordSize));
525 }
526 } else if (r_1->is_Register()) {
527 if (!r_2->is_valid()) {
533 } else if (r_1->is_FloatRegister()) {
534 #ifdef __SOFTFP__
535 ShouldNotReachHere();
536 #endif // __SOFTFP__
537 if (!r_2->is_valid()) {
538 __ flds(r_1->as_FloatRegister(), Address(initial_sp, arg_offset));
539 } else {
540 __ fldd(r_1->as_FloatRegister(), Address(initial_sp, arg_offset - Interpreter::stackElementSize));
541 }
542 } else {
543 assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
544 }
545 }
546
547 // restore Rmethod (scratched for initial_sp)
548 __ ldr(Rmethod, callee_target_addr);
549 __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
550
551 }
552
553 static void gen_c2i_adapter(MacroAssembler *masm, int comp_args_on_stack, const GrowableArray<SigEntry>* sig, const VMRegPair *regs,
554 Label& skip_fixup) {
555 // TODO: ARM - May be can use stm to deoptimize arguments
556 const Register tmp = Rtemp;
557
558 patch_callers_callsite(masm);
559 __ bind(skip_fixup);
560
561 __ mov(Rsender_sp, SP); // not yet saved
562
563
564 int total_args_passed = sig->length();
565 int extraspace = total_args_passed * Interpreter::stackElementSize;
566 if (extraspace) {
567 __ sub_slow(SP, SP, extraspace);
568 }
569
570 for (int i = 0; i < total_args_passed; i++) {
571 BasicType bt = sig->at(i)._bt;
572 if (bt == T_VOID) {
573 assert(i > 0 && (sig->at(i - 1)._bt == T_LONG || sig->at(i - 1)._bt == T_DOUBLE), "missing half");
574 continue;
575 }
576 int stack_offset = (total_args_passed - 1 - i) * Interpreter::stackElementSize;
577
578 VMReg r_1 = regs[i].first();
579 VMReg r_2 = regs[i].second();
580 if (r_1->is_stack()) {
581 int arg_offset = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
582 if (!r_2->is_valid()) {
583 __ ldr(tmp, Address(SP, arg_offset));
584 __ str(tmp, Address(SP, stack_offset));
585 } else {
586 __ ldr(tmp, Address(SP, arg_offset));
587 __ str(tmp, Address(SP, stack_offset - Interpreter::stackElementSize));
588 __ ldr(tmp, Address(SP, arg_offset + wordSize));
589 __ str(tmp, Address(SP, stack_offset));
590 }
591 } else if (r_1->is_Register()) {
592 if (!r_2->is_valid()) {
593 __ str(r_1->as_Register(), Address(SP, stack_offset));
596 __ str(r_2->as_Register(), Address(SP, stack_offset));
597 }
598 } else if (r_1->is_FloatRegister()) {
599 #ifdef __SOFTFP__
600 ShouldNotReachHere();
601 #endif // __SOFTFP__
602 if (!r_2->is_valid()) {
603 __ fsts(r_1->as_FloatRegister(), Address(SP, stack_offset));
604 } else {
605 __ fstd(r_1->as_FloatRegister(), Address(SP, stack_offset - Interpreter::stackElementSize));
606 }
607 } else {
608 assert(!r_1->is_valid() && !r_2->is_valid(), "must be");
609 }
610 }
611
612 __ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()));
613
614 }
615
616 void SharedRuntime::generate_i2c2i_adapters(MacroAssembler* masm,
617 int comp_args_on_stack,
618 const GrowableArray<SigEntry>* sig,
619 const VMRegPair* regs,
620 const GrowableArray<SigEntry>* sig_cc,
621 const VMRegPair* regs_cc,
622 const GrowableArray<SigEntry>* sig_cc_ro,
623 const VMRegPair* regs_cc_ro,
624 address entry_address[AdapterBlob::ENTRY_COUNT],
625 AdapterBlob*& new_adapter,
626 bool allocate_code_blob) {
627
628 entry_address[AdapterBlob::I2C] = __ pc();
629 gen_i2c_adapter(masm, comp_args_on_stack, sig, regs);
630
631 entry_address[AdapterBlob::C2I_Unverified] = __ pc();
632 Label skip_fixup;
633 const Register receiver = R0;
634 const Register holder_klass = Rtemp; // XXX should be OK for C2 but not 100% sure
635
636 __ ic_check(1 /* end_alignment */);
637 __ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
638
639 __ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
640 __ cmp(Rtemp, 0, eq);
641 __ b(skip_fixup, eq);
642 __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
643
644 entry_address[AdapterBlob::C2I] = __ pc();
645 entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
646 gen_c2i_adapter(masm, comp_args_on_stack, sig, regs, skip_fixup);
647 return;
648 }
649
650
651 static int reg2offset_in(VMReg r) {
652 // Account for saved FP and LR
653 return r->reg2stack() * VMRegImpl::stack_slot_size + 2*wordSize;
654 }
655
656 static int reg2offset_out(VMReg r) {
657 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
658 }
659
660
661 static void verify_oop_args(MacroAssembler* masm,
662 const methodHandle& method,
663 const BasicType* sig_bt,
664 const VMRegPair* regs) {
665 Register temp_reg = Rmethod; // not part of any compiled calling seq
666 if (VerifyOops) {
1833 __ reset_last_Java_frame(Rtemp);
1834
1835 __ raw_pop(R1, R2, LR);
1836 __ ret();
1837
1838 OopMapSet* oop_maps = new OopMapSet();
1839 OopMap* map = new OopMap(framesize, 1);
1840 oop_maps->add_gc_map(frame_complete, map);
1841
1842 RuntimeStub* stub =
1843 RuntimeStub::new_runtime_stub(name,
1844 &code,
1845 frame_complete,
1846 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
1847 oop_maps,
1848 false);
1849 return stub;
1850 }
1851
1852 #endif // INCLUDE_JFR
1853
1854 const uint SharedRuntime::java_return_convention_max_int = 0; // Argument::n_int_register_parameters_j;
1855 const uint SharedRuntime::java_return_convention_max_float = 0; // Argument::n_float_register_parameters_j;
1856
1857 int SharedRuntime::java_return_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed) {
1858 Unimplemented();
1859 return 0;
1860 }
1861
1862 BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(const InlineKlass* vk) {
1863 Unimplemented();
1864 return nullptr;
1865 }
|