650 break;
651
652 case new_instance_id:
653 case fast_new_instance_id:
654 case fast_new_instance_init_check_id:
655 {
656 Register klass = r3; // Incoming
657 Register obj = r0; // Result
658
659 if (id == new_instance_id) {
660 __ set_info("new_instance", dont_gc_arguments);
661 } else if (id == fast_new_instance_id) {
662 __ set_info("fast new_instance", dont_gc_arguments);
663 } else {
664 assert(id == fast_new_instance_init_check_id, "bad StubID");
665 __ set_info("fast new_instance init check", dont_gc_arguments);
666 }
667
668 __ enter();
669 OopMap* map = save_live_registers(sasm);
670 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
671 oop_maps = new OopMapSet();
672 oop_maps->add_gc_map(call_offset, map);
673 restore_live_registers_except_r0(sasm);
674 __ verify_oop(obj);
675 __ leave();
676 __ ret(lr);
677
678 // r0,: new instance
679 }
680
681 break;
682
683 case counter_overflow_id:
684 {
685 Register bci = r0, method = r1;
686 __ enter();
687 OopMap* map = save_live_registers(sasm);
688 // Retrieve bci
689 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
690 // And a pointer to the Method*
691 __ ldr(method, Address(rfp, 3*BytesPerWord));
692 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
693 oop_maps = new OopMapSet();
694 oop_maps->add_gc_map(call_offset, map);
695 restore_live_registers(sasm);
696 __ leave();
697 __ ret(lr);
698 }
699 break;
700
701 case new_type_array_id:
702 case new_object_array_id:
703 {
704 Register length = r19; // Incoming
705 Register klass = r3; // Incoming
706 Register obj = r0; // Result
707
708 if (id == new_type_array_id) {
709 __ set_info("new_type_array", dont_gc_arguments);
710 } else {
711 __ set_info("new_object_array", dont_gc_arguments);
712 }
713
714 #ifdef ASSERT
715 // assert object type is really an array of the proper kind
716 {
717 Label ok;
718 Register t0 = obj;
719 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
720 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
721 int tag = ((id == new_type_array_id)
722 ? Klass::_lh_array_tag_type_value
723 : Klass::_lh_array_tag_obj_value);
724 __ mov(rscratch1, tag);
725 __ cmpw(t0, rscratch1);
726 __ br(Assembler::EQ, ok);
727 __ stop("assert(is an array klass)");
728 __ should_not_reach_here();
729 __ bind(ok);
730 }
731 #endif // ASSERT
732
733 __ enter();
734 OopMap* map = save_live_registers(sasm);
735 int call_offset;
736 if (id == new_type_array_id) {
737 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
738 } else {
739 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
740 }
741
742 oop_maps = new OopMapSet();
743 oop_maps->add_gc_map(call_offset, map);
744 restore_live_registers_except_r0(sasm);
745
746 __ verify_oop(obj);
747 __ leave();
748 __ ret(lr);
749
750 // r0: new array
751 }
752 break;
753
754 case new_multi_array_id:
755 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
756 // r0,: klass
757 // r19,: rank
758 // r2: address of 1st dimension
759 OopMap* map = save_live_registers(sasm);
760 __ mov(c_rarg1, r0);
761 __ mov(c_rarg3, r2);
762 __ mov(c_rarg2, r19);
763 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
764
765 oop_maps = new OopMapSet();
766 oop_maps->add_gc_map(call_offset, map);
767 restore_live_registers_except_r0(sasm);
768
769 // r0,: new multi array
770 __ verify_oop(r0);
771 }
772 break;
773
774 case register_finalizer_id:
775 {
776 __ set_info("register_finalizer", dont_gc_arguments);
777
778 // This is called via call_runtime so the arguments
779 // will be place in C abi locations
780
781 __ verify_oop(c_rarg0);
782
783 // load the klass and check the has finalizer flag
784 Label register_finalizer;
785 Register t = r5;
786 __ load_klass(t, r0);
787 __ ldrw(t, Address(t, Klass::access_flags_offset()));
788 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
789 __ ret(lr);
790
791 __ bind(register_finalizer);
792 __ enter();
793 OopMap* oop_map = save_live_registers(sasm);
794 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
795 oop_maps = new OopMapSet();
796 oop_maps->add_gc_map(call_offset, oop_map);
797
798 // Now restore all the live registers
799 restore_live_registers(sasm);
800
801 __ leave();
802 __ ret(lr);
803 }
804 break;
805
806 case throw_class_cast_exception_id:
807 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
808 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
809 }
810 break;
811
812 case throw_incompatible_class_change_error_id:
813 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
814 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
815 }
816 break;
817
818 case slow_subtype_check_id:
819 {
820 // Typical calling sequence:
821 // __ push(klass_RInfo); // object klass or other subclass
822 // __ push(sup_k_RInfo); // array element klass or other superclass
823 // __ bl(slow_subtype_check);
824 // Note that the subclass is pushed first, and is therefore deepest.
825 enum layout {
826 r0_off, r0_off_hi,
827 r2_off, r2_off_hi,
828 r4_off, r4_off_hi,
829 r5_off, r5_off_hi,
830 sup_k_off, sup_k_off_hi,
831 klass_off, klass_off_hi,
832 framesize,
833 result_off = sup_k_off
834 };
835
836 __ set_info("slow_subtype_check", dont_gc_arguments);
837 __ push(RegSet::of(r0, r2, r4, r5), sp);
1001 __ leave();
1002 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1003 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1004
1005 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1006 }
1007 break;
1008
1009 case dtrace_object_alloc_id:
1010 { // c_rarg0: object
1011 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1012 save_live_registers(sasm);
1013
1014 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1015
1016 restore_live_registers(sasm);
1017 }
1018 break;
1019
1020 default:
1021 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1022 __ mov(r0, (int)id);
1023 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1024 }
1025 break;
1026 }
1027 }
1028 return oop_maps;
1029 }
1030
1031 #undef __
1032
1033 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|
650 break;
651
652 case new_instance_id:
653 case fast_new_instance_id:
654 case fast_new_instance_init_check_id:
655 {
656 Register klass = r3; // Incoming
657 Register obj = r0; // Result
658
659 if (id == new_instance_id) {
660 __ set_info("new_instance", dont_gc_arguments);
661 } else if (id == fast_new_instance_id) {
662 __ set_info("fast new_instance", dont_gc_arguments);
663 } else {
664 assert(id == fast_new_instance_init_check_id, "bad StubID");
665 __ set_info("fast new_instance init check", dont_gc_arguments);
666 }
667
668 __ enter();
669 OopMap* map = save_live_registers(sasm);
670 int call_offset;
671 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
672 oop_maps = new OopMapSet();
673 oop_maps->add_gc_map(call_offset, map);
674 restore_live_registers_except_r0(sasm);
675 __ verify_oop(obj);
676 __ leave();
677 __ ret(lr);
678
679 // r0,: new instance
680 }
681
682 break;
683
684 case counter_overflow_id:
685 {
686 Register bci = r0, method = r1;
687 __ enter();
688 OopMap* map = save_live_registers(sasm);
689 // Retrieve bci
690 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
691 // And a pointer to the Method*
692 __ ldr(method, Address(rfp, 3*BytesPerWord));
693 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
694 oop_maps = new OopMapSet();
695 oop_maps->add_gc_map(call_offset, map);
696 restore_live_registers(sasm);
697 __ leave();
698 __ ret(lr);
699 }
700 break;
701
702 case new_type_array_id:
703 case new_object_array_id:
704 case new_flat_array_id:
705 {
706 Register length = r19; // Incoming
707 Register klass = r3; // Incoming
708 Register obj = r0; // Result
709
710 if (id == new_type_array_id) {
711 __ set_info("new_type_array", dont_gc_arguments);
712 } else if (id == new_object_array_id) {
713 __ set_info("new_object_array", dont_gc_arguments);
714 } else {
715 __ set_info("new_flat_array", dont_gc_arguments);
716 }
717
718 #ifdef ASSERT
719 // assert object type is really an array of the proper kind
720 {
721 Label ok;
722 Register t0 = obj;
723 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
724 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
725 switch (id) {
726 case new_type_array_id:
727 __ cmpw(t0, Klass::_lh_array_tag_type_value);
728 __ br(Assembler::EQ, ok);
729 __ stop("assert(is a type array klass)");
730 break;
731 case new_object_array_id:
732 __ cmpw(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
733 __ br(Assembler::EQ, ok);
734 __ cmpw(t0, Klass::_lh_array_tag_vt_value); // new "[LVT;"
735 __ br(Assembler::EQ, ok);
736 __ stop("assert(is an object or inline type array klass)");
737 break;
738 case new_flat_array_id:
739 // TODO 8325106 Fix comment
740 // new "[QVT;"
741 __ cmpw(t0, Klass::_lh_array_tag_vt_value); // the array can be a flat array.
742 __ br(Assembler::EQ, ok);
743 __ cmpw(t0, Klass::_lh_array_tag_obj_value); // the array cannot be a flat array (due to InlineArrayElementMaxFlatSize, etc)
744 __ br(Assembler::EQ, ok);
745 __ stop("assert(is an object or inline type array klass)");
746 break;
747 default: ShouldNotReachHere();
748 }
749 __ should_not_reach_here();
750 __ bind(ok);
751 }
752 #endif // ASSERT
753
754 __ enter();
755 OopMap* map = save_live_registers(sasm);
756 int call_offset;
757 if (id == new_type_array_id) {
758 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
759 } else if (id == new_object_array_id) {
760 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
761 } else {
762 assert(id == new_flat_array_id, "must be");
763 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_flat_array), klass, length);
764 }
765
766 oop_maps = new OopMapSet();
767 oop_maps->add_gc_map(call_offset, map);
768 restore_live_registers_except_r0(sasm);
769
770 __ verify_oop(obj);
771 __ leave();
772 __ ret(lr);
773
774 // r0: new array
775 }
776 break;
777
778 case new_multi_array_id:
779 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
780 // r0,: klass
781 // r19,: rank
782 // r2: address of 1st dimension
783 OopMap* map = save_live_registers(sasm);
784 __ mov(c_rarg1, r0);
785 __ mov(c_rarg3, r2);
786 __ mov(c_rarg2, r19);
787 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
788
789 oop_maps = new OopMapSet();
790 oop_maps->add_gc_map(call_offset, map);
791 restore_live_registers_except_r0(sasm);
792
793 // r0,: new multi array
794 __ verify_oop(r0);
795 }
796 break;
797
798 case buffer_inline_args_id:
799 case buffer_inline_args_no_receiver_id:
800 {
801 const char* name = (id == buffer_inline_args_id) ?
802 "buffer_inline_args" : "buffer_inline_args_no_receiver";
803 StubFrame f(sasm, name, dont_gc_arguments);
804 OopMap* map = save_live_registers(sasm);
805 Register method = r19; // Incoming
806 address entry = (id == buffer_inline_args_id) ?
807 CAST_FROM_FN_PTR(address, buffer_inline_args) :
808 CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
809 // This is called from a C1 method's scalarized entry point
810 // where r0-r7 may be holding live argument values so we can't
811 // return the result in r0 as the other stubs do. LR is used as
812 // a temporay below to avoid the result being clobbered by
813 // restore_live_registers.
814 int call_offset = __ call_RT(lr, noreg, entry, method);
815 oop_maps = new OopMapSet();
816 oop_maps->add_gc_map(call_offset, map);
817 restore_live_registers(sasm);
818 __ mov(r20, lr);
819 __ verify_oop(r20); // r20: an array of buffered value objects
820 }
821 break;
822
823 case load_flat_array_id:
824 {
825 StubFrame f(sasm, "load_flat_array", dont_gc_arguments);
826 OopMap* map = save_live_registers(sasm);
827
828 // Called with store_parameter and not C abi
829
830 f.load_argument(1, r0); // r0,: array
831 f.load_argument(0, r1); // r1,: index
832 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flat_array), r0, r1);
833
834 // Ensure the stores that initialize the buffer are visible
835 // before any subsequent store that publishes this reference.
836 __ membar(Assembler::StoreStore);
837
838 oop_maps = new OopMapSet();
839 oop_maps->add_gc_map(call_offset, map);
840 restore_live_registers_except_r0(sasm);
841
842 // r0: loaded element at array[index]
843 __ verify_oop(r0);
844 }
845 break;
846
847 case store_flat_array_id:
848 {
849 StubFrame f(sasm, "store_flat_array", dont_gc_arguments);
850 OopMap* map = save_live_registers(sasm, 4);
851
852 // Called with store_parameter and not C abi
853
854 f.load_argument(2, r0); // r0: array
855 f.load_argument(1, r1); // r1: index
856 f.load_argument(0, r2); // r2: value
857 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flat_array), r0, r1, r2);
858
859 oop_maps = new OopMapSet();
860 oop_maps->add_gc_map(call_offset, map);
861 restore_live_registers_except_r0(sasm);
862 }
863 break;
864
865 case substitutability_check_id:
866 {
867 StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
868 OopMap* map = save_live_registers(sasm);
869
870 // Called with store_parameter and not C abi
871
872 f.load_argument(1, r1); // r1,: left
873 f.load_argument(0, r2); // r2,: right
874 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), r1, r2);
875
876 oop_maps = new OopMapSet();
877 oop_maps->add_gc_map(call_offset, map);
878 restore_live_registers_except_r0(sasm);
879
880 // r0,: are the two operands substitutable
881 }
882 break;
883
884 case register_finalizer_id:
885 {
886 __ set_info("register_finalizer", dont_gc_arguments);
887
888 // This is called via call_runtime so the arguments
889 // will be place in C abi locations
890
891 __ verify_oop(c_rarg0);
892
893 // load the klass and check the has finalizer flag
894 Label register_finalizer;
895 Register t = r5;
896 __ load_klass(t, r0);
897 __ ldrw(t, Address(t, Klass::access_flags_offset()));
898 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
899 __ ret(lr);
900
901 __ bind(register_finalizer);
902 __ enter();
903 OopMap* oop_map = save_live_registers(sasm);
904 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
905 oop_maps = new OopMapSet();
906 oop_maps->add_gc_map(call_offset, oop_map);
907
908 // Now restore all the live registers
909 restore_live_registers(sasm);
910
911 __ leave();
912 __ ret(lr);
913 }
914 break;
915
916 case throw_class_cast_exception_id:
917 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
918 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
919 }
920 break;
921
922 case throw_incompatible_class_change_error_id:
923 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments, does_not_return);
924 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
925 }
926 break;
927
928 case throw_illegal_monitor_state_exception_id:
929 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
930 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
931 }
932 break;
933
934 case slow_subtype_check_id:
935 {
936 // Typical calling sequence:
937 // __ push(klass_RInfo); // object klass or other subclass
938 // __ push(sup_k_RInfo); // array element klass or other superclass
939 // __ bl(slow_subtype_check);
940 // Note that the subclass is pushed first, and is therefore deepest.
941 enum layout {
942 r0_off, r0_off_hi,
943 r2_off, r2_off_hi,
944 r4_off, r4_off_hi,
945 r5_off, r5_off_hi,
946 sup_k_off, sup_k_off_hi,
947 klass_off, klass_off_hi,
948 framesize,
949 result_off = sup_k_off
950 };
951
952 __ set_info("slow_subtype_check", dont_gc_arguments);
953 __ push(RegSet::of(r0, r2, r4, r5), sp);
1117 __ leave();
1118 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1119 assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1120
1121 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1122 }
1123 break;
1124
1125 case dtrace_object_alloc_id:
1126 { // c_rarg0: object
1127 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1128 save_live_registers(sasm);
1129
1130 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1131
1132 restore_live_registers(sasm);
1133 }
1134 break;
1135
1136 default:
1137 // FIXME: For unhandled trap_id this code fails with assert during vm intialization
1138 // rather than insert a call to unimplemented_entry
1139 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1140 __ mov(r0, (int)id);
1141 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1142 }
1143 break;
1144 }
1145 }
1146
1147
1148 return oop_maps;
1149 }
1150
1151 #undef __
1152
1153 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|