624 {
625 oop_maps = generate_handle_exception(id, sasm);
626 __ leave();
627 __ ret(lr);
628 }
629 break;
630
631 case throw_div0_exception_id:
632 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
633 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
634 }
635 break;
636
637 case throw_null_pointer_exception_id:
638 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
639 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
640 }
641 break;
642
643 case new_instance_id:
644 case fast_new_instance_id:
645 case fast_new_instance_init_check_id:
646 {
647 Register klass = r3; // Incoming
648 Register obj = r0; // Result
649
650 if (id == new_instance_id) {
651 __ set_info("new_instance", dont_gc_arguments);
652 } else if (id == fast_new_instance_id) {
653 __ set_info("fast new_instance", dont_gc_arguments);
654 } else {
655 assert(id == fast_new_instance_init_check_id, "bad StubID");
656 __ set_info("fast new_instance init check", dont_gc_arguments);
657 }
658
659 // If TLAB is disabled, see if there is support for inlining contiguous
660 // allocations.
661 // Otherwise, just go to the slow path.
662 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
663 !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
664 Label slow_path;
665 Register obj_size = r19;
666 Register t1 = r10;
667 Register t2 = r11;
668 assert_different_registers(klass, obj, obj_size, t1, t2);
669
670 __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
671
691 __ bind(ok);
692 }
693 #endif // ASSERT
694
695 // get the instance size (size is postive so movl is fine for 64bit)
696 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
697
698 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
699
700 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
701 __ verify_oop(obj);
702 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
703 __ ret(lr);
704
705 __ bind(slow_path);
706 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
707 }
708
709 __ enter();
710 OopMap* map = save_live_registers(sasm);
711 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
712 oop_maps = new OopMapSet();
713 oop_maps->add_gc_map(call_offset, map);
714 restore_live_registers_except_r0(sasm);
715 __ verify_oop(obj);
716 __ leave();
717 __ ret(lr);
718
719 // r0,: new instance
720 }
721
722 break;
723
724 case counter_overflow_id:
725 {
726 Register bci = r0, method = r1;
727 __ enter();
728 OopMap* map = save_live_registers(sasm);
729 // Retrieve bci
730 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
731 // And a pointer to the Method*
732 __ ldr(method, Address(rfp, 3*BytesPerWord));
733 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
734 oop_maps = new OopMapSet();
735 oop_maps->add_gc_map(call_offset, map);
736 restore_live_registers(sasm);
737 __ leave();
738 __ ret(lr);
739 }
740 break;
741
742 case new_type_array_id:
743 case new_object_array_id:
744 {
745 Register length = r19; // Incoming
746 Register klass = r3; // Incoming
747 Register obj = r0; // Result
748
749 if (id == new_type_array_id) {
750 __ set_info("new_type_array", dont_gc_arguments);
751 } else {
752 __ set_info("new_object_array", dont_gc_arguments);
753 }
754
755 #ifdef ASSERT
756 // assert object type is really an array of the proper kind
757 {
758 Label ok;
759 Register t0 = obj;
760 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
761 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
762 int tag = ((id == new_type_array_id)
763 ? Klass::_lh_array_tag_type_value
764 : Klass::_lh_array_tag_obj_value);
765 __ mov(rscratch1, tag);
766 __ cmpw(t0, rscratch1);
767 __ br(Assembler::EQ, ok);
768 __ stop("assert(is an array klass)");
769 __ should_not_reach_here();
770 __ bind(ok);
771 }
772 #endif // ASSERT
773
774 // If TLAB is disabled, see if there is support for inlining contiguous
775 // allocations.
776 // Otherwise, just go to the slow path.
777 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
778 Register arr_size = r5;
779 Register t1 = r10;
780 Register t2 = r11;
781 Label slow_path;
782 assert_different_registers(length, klass, obj, arr_size, t1, t2);
783
784 // check that array length is small enough for fast path.
785 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
786 __ cmpw(length, rscratch1);
787 __ br(Assembler::HI, slow_path);
788
804 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
805 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
806 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
807 __ andr(t1, t1, Klass::_lh_header_size_mask);
808 __ sub(arr_size, arr_size, t1); // body length
809 __ add(t1, t1, obj); // body start
810 __ initialize_body(t1, arr_size, 0, t1, t2);
811 __ membar(Assembler::StoreStore);
812 __ verify_oop(obj);
813
814 __ ret(lr);
815
816 __ bind(slow_path);
817 }
818
819 __ enter();
820 OopMap* map = save_live_registers(sasm);
821 int call_offset;
822 if (id == new_type_array_id) {
823 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
824 } else {
825 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
826 }
827
828 oop_maps = new OopMapSet();
829 oop_maps->add_gc_map(call_offset, map);
830 restore_live_registers_except_r0(sasm);
831
832 __ verify_oop(obj);
833 __ leave();
834 __ ret(lr);
835
836 // r0: new array
837 }
838 break;
839
840 case new_multi_array_id:
841 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
842 // r0,: klass
843 // r19,: rank
844 // r2: address of 1st dimension
845 OopMap* map = save_live_registers(sasm);
846 __ mov(c_rarg1, r0);
847 __ mov(c_rarg3, r2);
848 __ mov(c_rarg2, r19);
849 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
850
851 oop_maps = new OopMapSet();
852 oop_maps->add_gc_map(call_offset, map);
853 restore_live_registers_except_r0(sasm);
854
855 // r0,: new multi array
856 __ verify_oop(r0);
857 }
858 break;
859
860 case register_finalizer_id:
861 {
862 __ set_info("register_finalizer", dont_gc_arguments);
863
864 // This is called via call_runtime so the arguments
865 // will be place in C abi locations
866
867 __ verify_oop(c_rarg0);
868
869 // load the klass and check the has finalizer flag
870 Label register_finalizer;
871 Register t = r5;
872 __ load_klass(t, r0);
873 __ ldrw(t, Address(t, Klass::access_flags_offset()));
874 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
875 __ ret(lr);
876
877 __ bind(register_finalizer);
878 __ enter();
879 OopMap* oop_map = save_live_registers(sasm);
880 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
881 oop_maps = new OopMapSet();
882 oop_maps->add_gc_map(call_offset, oop_map);
883
884 // Now restore all the live registers
885 restore_live_registers(sasm);
886
887 __ leave();
888 __ ret(lr);
889 }
890 break;
891
892 case throw_class_cast_exception_id:
893 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
894 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
895 }
896 break;
897
898 case throw_incompatible_class_change_error_id:
899 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
900 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
901 }
902 break;
903
904 case slow_subtype_check_id:
905 {
906 // Typical calling sequence:
907 // __ push(klass_RInfo); // object klass or other subclass
908 // __ push(sup_k_RInfo); // array element klass or other superclass
909 // __ bl(slow_subtype_check);
910 // Note that the subclass is pushed first, and is therefore deepest.
911 enum layout {
912 r0_off, r0_off_hi,
913 r2_off, r2_off_hi,
914 r4_off, r4_off_hi,
915 r5_off, r5_off_hi,
916 sup_k_off, sup_k_off_hi,
917 klass_off, klass_off_hi,
918 framesize,
919 result_off = sup_k_off
920 };
921
922 __ set_info("slow_subtype_check", dont_gc_arguments);
923 __ push(RegSet::of(r0, r2, r4, r5), sp);
1087 __ leave();
1088 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1089 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1090
1091 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1092 }
1093 break;
1094
1095 case dtrace_object_alloc_id:
1096 { // c_rarg0: object
1097 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1098 save_live_registers(sasm);
1099
1100 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1101
1102 restore_live_registers(sasm);
1103 }
1104 break;
1105
1106 default:
1107 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1108 __ mov(r0, (int)id);
1109 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1110 }
1111 break;
1112 }
1113 }
1114 return oop_maps;
1115 }
1116
1117 #undef __
1118
1119 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|
624 {
625 oop_maps = generate_handle_exception(id, sasm);
626 __ leave();
627 __ ret(lr);
628 }
629 break;
630
631 case throw_div0_exception_id:
632 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
633 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
634 }
635 break;
636
637 case throw_null_pointer_exception_id:
638 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
639 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
640 }
641 break;
642
643 case new_instance_id:
644 case new_instance_no_inline_id:
645 case fast_new_instance_id:
646 case fast_new_instance_init_check_id:
647 {
648 Register klass = r3; // Incoming
649 Register obj = r0; // Result
650
651 if (id == new_instance_id) {
652 __ set_info("new_instance", dont_gc_arguments);
653 } else if (id == new_instance_no_inline_id) {
654 __ set_info("new_instance_no_inline", dont_gc_arguments);
655 } else if (id == fast_new_instance_id) {
656 __ set_info("fast new_instance", dont_gc_arguments);
657 } else {
658 assert(id == fast_new_instance_init_check_id, "bad StubID");
659 __ set_info("fast new_instance init check", dont_gc_arguments);
660 }
661
662 // If TLAB is disabled, see if there is support for inlining contiguous
663 // allocations.
664 // Otherwise, just go to the slow path.
665 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
666 !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
667 Label slow_path;
668 Register obj_size = r19;
669 Register t1 = r10;
670 Register t2 = r11;
671 assert_different_registers(klass, obj, obj_size, t1, t2);
672
673 __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
674
694 __ bind(ok);
695 }
696 #endif // ASSERT
697
698 // get the instance size (size is postive so movl is fine for 64bit)
699 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
700
701 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
702
703 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
704 __ verify_oop(obj);
705 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
706 __ ret(lr);
707
708 __ bind(slow_path);
709 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
710 }
711
712 __ enter();
713 OopMap* map = save_live_registers(sasm);
714 int call_offset;
715 if (id == new_instance_no_inline_id) {
716 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance_no_inline), klass);
717 } else {
718 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
719 }
720 oop_maps = new OopMapSet();
721 oop_maps->add_gc_map(call_offset, map);
722 restore_live_registers_except_r0(sasm);
723 __ verify_oop(obj);
724 __ leave();
725 __ ret(lr);
726
727 // r0,: new instance
728 }
729
730 break;
731
732 case counter_overflow_id:
733 {
734 Register bci = r0, method = r1;
735 __ enter();
736 OopMap* map = save_live_registers(sasm);
737 // Retrieve bci
738 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
739 // And a pointer to the Method*
740 __ ldr(method, Address(rfp, 3*BytesPerWord));
741 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
742 oop_maps = new OopMapSet();
743 oop_maps->add_gc_map(call_offset, map);
744 restore_live_registers(sasm);
745 __ leave();
746 __ ret(lr);
747 }
748 break;
749
750 case new_type_array_id:
751 case new_object_array_id:
752 case new_flat_array_id:
753 {
754 Register length = r19; // Incoming
755 Register klass = r3; // Incoming
756 Register obj = r0; // Result
757
758 if (id == new_type_array_id) {
759 __ set_info("new_type_array", dont_gc_arguments);
760 } else if (id == new_object_array_id) {
761 __ set_info("new_object_array", dont_gc_arguments);
762 } else {
763 __ set_info("new_flat_array", dont_gc_arguments);
764 }
765
766 #ifdef ASSERT
767 // assert object type is really an array of the proper kind
768 {
769 Label ok;
770 Register t0 = obj;
771 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
772 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
773 switch (id) {
774 case new_type_array_id:
775 __ cmpw(t0, Klass::_lh_array_tag_type_value);
776 __ br(Assembler::EQ, ok);
777 __ stop("assert(is a type array klass)");
778 break;
779 case new_object_array_id:
780 __ cmpw(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
781 __ br(Assembler::EQ, ok);
782 __ cmpw(t0, Klass::_lh_array_tag_vt_value); // new "[LVT;"
783 __ br(Assembler::EQ, ok);
784 __ stop("assert(is an object or inline type array klass)");
785 break;
786 case new_flat_array_id:
787 // new "[QVT;"
788 __ cmpw(t0, Klass::_lh_array_tag_vt_value); // the array can be flattened.
789 __ br(Assembler::EQ, ok);
790 __ cmpw(t0, Klass::_lh_array_tag_obj_value); // the array cannot be flattened (due to InlineArrayElementMaxFlatSize, etc)
791 __ br(Assembler::EQ, ok);
792 __ stop("assert(is an object or inline type array klass)");
793 break;
794 default: ShouldNotReachHere();
795 }
796 __ should_not_reach_here();
797 __ bind(ok);
798 }
799 #endif // ASSERT
800
801 // If TLAB is disabled, see if there is support for inlining contiguous
802 // allocations.
803 // Otherwise, just go to the slow path.
804 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
805 Register arr_size = r5;
806 Register t1 = r10;
807 Register t2 = r11;
808 Label slow_path;
809 assert_different_registers(length, klass, obj, arr_size, t1, t2);
810
811 // check that array length is small enough for fast path.
812 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
813 __ cmpw(length, rscratch1);
814 __ br(Assembler::HI, slow_path);
815
831 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
832 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
833 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
834 __ andr(t1, t1, Klass::_lh_header_size_mask);
835 __ sub(arr_size, arr_size, t1); // body length
836 __ add(t1, t1, obj); // body start
837 __ initialize_body(t1, arr_size, 0, t1, t2);
838 __ membar(Assembler::StoreStore);
839 __ verify_oop(obj);
840
841 __ ret(lr);
842
843 __ bind(slow_path);
844 }
845
846 __ enter();
847 OopMap* map = save_live_registers(sasm);
848 int call_offset;
849 if (id == new_type_array_id) {
850 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
851 } else if (id == new_object_array_id) {
852 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
853 } else {
854 assert(id == new_flat_array_id, "must be");
855 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_flat_array), klass, length);
856 }
857
858 oop_maps = new OopMapSet();
859 oop_maps->add_gc_map(call_offset, map);
860 restore_live_registers_except_r0(sasm);
861
862 __ verify_oop(obj);
863 __ leave();
864 __ ret(lr);
865
866 // r0: new array
867 }
868 break;
869
870 case new_multi_array_id:
871 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
872 // r0,: klass
873 // r19,: rank
874 // r2: address of 1st dimension
875 OopMap* map = save_live_registers(sasm);
876 __ mov(c_rarg1, r0);
877 __ mov(c_rarg3, r2);
878 __ mov(c_rarg2, r19);
879 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
880
881 oop_maps = new OopMapSet();
882 oop_maps->add_gc_map(call_offset, map);
883 restore_live_registers_except_r0(sasm);
884
885 // r0,: new multi array
886 __ verify_oop(r0);
887 }
888 break;
889
890 case buffer_inline_args_id:
891 case buffer_inline_args_no_receiver_id:
892 {
893 const char* name = (id == buffer_inline_args_id) ?
894 "buffer_inline_args" : "buffer_inline_args_no_receiver";
895 StubFrame f(sasm, name, dont_gc_arguments);
896 OopMap* map = save_live_registers(sasm);
897 Register method = r19; // Incoming
898 address entry = (id == buffer_inline_args_id) ?
899 CAST_FROM_FN_PTR(address, buffer_inline_args) :
900 CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
901 // This is called from a C1 method's scalarized entry point
902 // where r0-r7 may be holding live argument values so we can't
903 // return the result in r0 as the other stubs do. LR is used as
904 // a temporay below to avoid the result being clobbered by
905 // restore_live_registers.
906 int call_offset = __ call_RT(lr, noreg, entry, method);
907 oop_maps = new OopMapSet();
908 oop_maps->add_gc_map(call_offset, map);
909 restore_live_registers(sasm);
910 __ mov(r20, lr);
911 __ verify_oop(r20); // r20: an array of buffered value objects
912 }
913 break;
914
915 case load_flattened_array_id:
916 {
917 StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
918 OopMap* map = save_live_registers(sasm);
919
920 // Called with store_parameter and not C abi
921
922 f.load_argument(1, r0); // r0,: array
923 f.load_argument(0, r1); // r1,: index
924 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), r0, r1);
925
926 oop_maps = new OopMapSet();
927 oop_maps->add_gc_map(call_offset, map);
928 restore_live_registers_except_r0(sasm);
929
930 // r0: loaded element at array[index]
931 __ verify_oop(r0);
932 }
933 break;
934
935 case store_flattened_array_id:
936 {
937 StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
938 OopMap* map = save_live_registers(sasm, 4);
939
940 // Called with store_parameter and not C abi
941
942 f.load_argument(2, r0); // r0: array
943 f.load_argument(1, r1); // r1: index
944 f.load_argument(0, r2); // r2: value
945 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), r0, r1, r2);
946
947 oop_maps = new OopMapSet();
948 oop_maps->add_gc_map(call_offset, map);
949 restore_live_registers_except_r0(sasm);
950 }
951 break;
952
953 case substitutability_check_id:
954 {
955 StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
956 OopMap* map = save_live_registers(sasm);
957
958 // Called with store_parameter and not C abi
959
960 f.load_argument(1, r1); // r1,: left
961 f.load_argument(0, r2); // r2,: right
962 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), r1, r2);
963
964 oop_maps = new OopMapSet();
965 oop_maps->add_gc_map(call_offset, map);
966 restore_live_registers_except_r0(sasm);
967
968 // r0,: are the two operands substitutable
969 }
970 break;
971
972 case register_finalizer_id:
973 {
974 __ set_info("register_finalizer", dont_gc_arguments);
975
976 // This is called via call_runtime so the arguments
977 // will be place in C abi locations
978
979 __ verify_oop(c_rarg0);
980
981 // load the klass and check the has finalizer flag
982 Label register_finalizer;
983 Register t = r5;
984 __ load_klass(t, r0);
985 __ ldrw(t, Address(t, Klass::access_flags_offset()));
986 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
987 __ ret(lr);
988
989 __ bind(register_finalizer);
990 __ enter();
991 OopMap* oop_map = save_live_registers(sasm);
992 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
993 oop_maps = new OopMapSet();
994 oop_maps->add_gc_map(call_offset, oop_map);
995
996 // Now restore all the live registers
997 restore_live_registers(sasm);
998
999 __ leave();
1000 __ ret(lr);
1001 }
1002 break;
1003
1004 case throw_class_cast_exception_id:
1005 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
1006 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1007 }
1008 break;
1009
1010 case throw_incompatible_class_change_error_id:
1011 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments, does_not_return);
1012 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1013 }
1014 break;
1015
1016 case throw_illegal_monitor_state_exception_id:
1017 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1018 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1019 }
1020 break;
1021
1022 case slow_subtype_check_id:
1023 {
1024 // Typical calling sequence:
1025 // __ push(klass_RInfo); // object klass or other subclass
1026 // __ push(sup_k_RInfo); // array element klass or other superclass
1027 // __ bl(slow_subtype_check);
1028 // Note that the subclass is pushed first, and is therefore deepest.
1029 enum layout {
1030 r0_off, r0_off_hi,
1031 r2_off, r2_off_hi,
1032 r4_off, r4_off_hi,
1033 r5_off, r5_off_hi,
1034 sup_k_off, sup_k_off_hi,
1035 klass_off, klass_off_hi,
1036 framesize,
1037 result_off = sup_k_off
1038 };
1039
1040 __ set_info("slow_subtype_check", dont_gc_arguments);
1041 __ push(RegSet::of(r0, r2, r4, r5), sp);
1205 __ leave();
1206 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1207 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1208
1209 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1210 }
1211 break;
1212
1213 case dtrace_object_alloc_id:
1214 { // c_rarg0: object
1215 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1216 save_live_registers(sasm);
1217
1218 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1219
1220 restore_live_registers(sasm);
1221 }
1222 break;
1223
1224 default:
1225 // FIXME: For unhandled trap_id this code fails with assert during vm intialization
1226 // rather than insert a call to unimplemented_entry
1227 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1228 __ mov(r0, (int)id);
1229 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1230 }
1231 break;
1232 }
1233 }
1234
1235
1236 return oop_maps;
1237 }
1238
1239 #undef __
1240
1241 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|