< prev index next >

src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp

Print this page

 624       {
 625         oop_maps = generate_handle_exception(id, sasm);
 626         __ leave();
 627         __ ret(lr);
 628       }
 629       break;
 630 
 631     case throw_div0_exception_id:
 632       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
 633         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 634       }
 635       break;
 636 
 637     case throw_null_pointer_exception_id:
 638       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
 639         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 640       }
 641       break;
 642 
 643     case new_instance_id:

 644     case fast_new_instance_id:
 645     case fast_new_instance_init_check_id:
 646       {
 647         Register klass = r3; // Incoming
 648         Register obj   = r0; // Result
 649 
 650         if (id == new_instance_id) {
 651           __ set_info("new_instance", dont_gc_arguments);


 652         } else if (id == fast_new_instance_id) {
 653           __ set_info("fast new_instance", dont_gc_arguments);
 654         } else {
 655           assert(id == fast_new_instance_init_check_id, "bad StubID");
 656           __ set_info("fast new_instance init check", dont_gc_arguments);
 657         }
 658 
 659         __ enter();
 660         OopMap* map = save_live_registers(sasm);
 661         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);





 662         oop_maps = new OopMapSet();
 663         oop_maps->add_gc_map(call_offset, map);
 664         restore_live_registers_except_r0(sasm);
 665         __ verify_oop(obj);
 666         __ leave();
 667         __ ret(lr);
 668 
 669         // r0,: new instance
 670       }
 671 
 672       break;
 673 
 674     case counter_overflow_id:
 675       {
 676         Register bci = r0, method = r1;
 677         __ enter();
 678         OopMap* map = save_live_registers(sasm);
 679         // Retrieve bci
 680         __ ldrw(bci, Address(rfp, 2*BytesPerWord));
 681         // And a pointer to the Method*
 682         __ ldr(method, Address(rfp, 3*BytesPerWord));
 683         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 684         oop_maps = new OopMapSet();
 685         oop_maps->add_gc_map(call_offset, map);
 686         restore_live_registers(sasm);
 687         __ leave();
 688         __ ret(lr);
 689       }
 690       break;
 691 
 692     case new_type_array_id:
 693     case new_object_array_id:

 694       {
 695         Register length   = r19; // Incoming
 696         Register klass    = r3; // Incoming
 697         Register obj      = r0; // Result
 698 
 699         if (id == new_type_array_id) {
 700           __ set_info("new_type_array", dont_gc_arguments);
 701         } else {
 702           __ set_info("new_object_array", dont_gc_arguments);


 703         }
 704 
 705 #ifdef ASSERT
 706         // assert object type is really an array of the proper kind
 707         {
 708           Label ok;
 709           Register t0 = obj;
 710           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
 711           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
 712           int tag = ((id == new_type_array_id)
 713                      ? Klass::_lh_array_tag_type_value
 714                      : Klass::_lh_array_tag_obj_value);
 715           __ mov(rscratch1, tag);
 716           __ cmpw(t0, rscratch1);
 717           __ br(Assembler::EQ, ok);
 718           __ stop("assert(is an array klass)");
















 719           __ should_not_reach_here();
 720           __ bind(ok);
 721         }
 722 #endif // ASSERT
 723 
 724         __ enter();
 725         OopMap* map = save_live_registers(sasm);
 726         int call_offset;
 727         if (id == new_type_array_id) {
 728           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 729         } else {
 730           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);



 731         }
 732 
 733         oop_maps = new OopMapSet();
 734         oop_maps->add_gc_map(call_offset, map);
 735         restore_live_registers_except_r0(sasm);
 736 
 737         __ verify_oop(obj);
 738         __ leave();
 739         __ ret(lr);
 740 
 741         // r0: new array
 742       }
 743       break;
 744 
 745     case new_multi_array_id:
 746       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 747         // r0,: klass
 748         // r19,: rank
 749         // r2: address of 1st dimension
 750         OopMap* map = save_live_registers(sasm);
 751         __ mov(c_rarg1, r0);
 752         __ mov(c_rarg3, r2);
 753         __ mov(c_rarg2, r19);
 754         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
 755 
 756         oop_maps = new OopMapSet();
 757         oop_maps->add_gc_map(call_offset, map);
 758         restore_live_registers_except_r0(sasm);
 759 
 760         // r0,: new multi array
 761         __ verify_oop(r0);
 762       }
 763       break;
 764 


















































































 765     case register_finalizer_id:
 766       {
 767         __ set_info("register_finalizer", dont_gc_arguments);
 768 
 769         // This is called via call_runtime so the arguments
 770         // will be place in C abi locations
 771 
 772         __ verify_oop(c_rarg0);
 773 
 774         // load the klass and check the has finalizer flag
 775         Label register_finalizer;
 776         Register t = r5;
 777         __ load_klass(t, r0);
 778         __ ldrw(t, Address(t, Klass::access_flags_offset()));
 779         __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
 780         __ ret(lr);
 781 
 782         __ bind(register_finalizer);
 783         __ enter();
 784         OopMap* oop_map = save_live_registers(sasm);
 785         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
 786         oop_maps = new OopMapSet();
 787         oop_maps->add_gc_map(call_offset, oop_map);
 788 
 789         // Now restore all the live registers
 790         restore_live_registers(sasm);
 791 
 792         __ leave();
 793         __ ret(lr);
 794       }
 795       break;
 796 
 797     case throw_class_cast_exception_id:
 798       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
 799         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 800       }
 801       break;
 802 
 803     case throw_incompatible_class_change_error_id:
 804       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
 805         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 806       }
 807       break;
 808 






 809     case slow_subtype_check_id:
 810       {
 811         // Typical calling sequence:
 812         // __ push(klass_RInfo);  // object klass or other subclass
 813         // __ push(sup_k_RInfo);  // array element klass or other superclass
 814         // __ bl(slow_subtype_check);
 815         // Note that the subclass is pushed first, and is therefore deepest.
 816         enum layout {
 817           r0_off, r0_off_hi,
 818           r2_off, r2_off_hi,
 819           r4_off, r4_off_hi,
 820           r5_off, r5_off_hi,
 821           sup_k_off, sup_k_off_hi,
 822           klass_off, klass_off_hi,
 823           framesize,
 824           result_off = sup_k_off
 825         };
 826 
 827         __ set_info("slow_subtype_check", dont_gc_arguments);
 828         __ push(RegSet::of(r0, r2, r4, r5), sp);

 992         __ leave();
 993         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 994         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
 995 
 996         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
 997       }
 998       break;
 999 
1000     case dtrace_object_alloc_id:
1001       { // c_rarg0: object
1002         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1003         save_live_registers(sasm);
1004 
1005         __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1006 
1007         restore_live_registers(sasm);
1008       }
1009       break;
1010 
1011     default:


1012       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1013         __ mov(r0, (int)id);
1014         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1015       }
1016       break;
1017     }
1018   }


1019   return oop_maps;
1020 }
1021 
1022 #undef __
1023 
1024 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }

 624       {
 625         oop_maps = generate_handle_exception(id, sasm);
 626         __ leave();
 627         __ ret(lr);
 628       }
 629       break;
 630 
 631     case throw_div0_exception_id:
 632       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
 633         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 634       }
 635       break;
 636 
 637     case throw_null_pointer_exception_id:
 638       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
 639         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 640       }
 641       break;
 642 
 643     case new_instance_id:
 644     case new_instance_no_inline_id:
 645     case fast_new_instance_id:
 646     case fast_new_instance_init_check_id:
 647       {
 648         Register klass = r3; // Incoming
 649         Register obj   = r0; // Result
 650 
 651         if (id == new_instance_id) {
 652           __ set_info("new_instance", dont_gc_arguments);
 653         } else if (id == new_instance_no_inline_id) {
 654           __ set_info("new_instance_no_inline", dont_gc_arguments);
 655         } else if (id == fast_new_instance_id) {
 656           __ set_info("fast new_instance", dont_gc_arguments);
 657         } else {
 658           assert(id == fast_new_instance_init_check_id, "bad StubID");
 659           __ set_info("fast new_instance init check", dont_gc_arguments);
 660         }
 661 
 662         __ enter();
 663         OopMap* map = save_live_registers(sasm);
 664         int call_offset;
 665         if (id == new_instance_no_inline_id) {
 666           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance_no_inline), klass);
 667         } else {
 668           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 669         }
 670         oop_maps = new OopMapSet();
 671         oop_maps->add_gc_map(call_offset, map);
 672         restore_live_registers_except_r0(sasm);
 673         __ verify_oop(obj);
 674         __ leave();
 675         __ ret(lr);
 676 
 677         // r0,: new instance
 678       }
 679 
 680       break;
 681 
 682     case counter_overflow_id:
 683       {
 684         Register bci = r0, method = r1;
 685         __ enter();
 686         OopMap* map = save_live_registers(sasm);
 687         // Retrieve bci
 688         __ ldrw(bci, Address(rfp, 2*BytesPerWord));
 689         // And a pointer to the Method*
 690         __ ldr(method, Address(rfp, 3*BytesPerWord));
 691         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 692         oop_maps = new OopMapSet();
 693         oop_maps->add_gc_map(call_offset, map);
 694         restore_live_registers(sasm);
 695         __ leave();
 696         __ ret(lr);
 697       }
 698       break;
 699 
 700     case new_type_array_id:
 701     case new_object_array_id:
 702     case new_flat_array_id:
 703       {
 704         Register length   = r19; // Incoming
 705         Register klass    = r3; // Incoming
 706         Register obj      = r0; // Result
 707 
 708         if (id == new_type_array_id) {
 709           __ set_info("new_type_array", dont_gc_arguments);
 710         } else if (id == new_object_array_id) {
 711           __ set_info("new_object_array", dont_gc_arguments);
 712         } else {
 713           __ set_info("new_flat_array", dont_gc_arguments);
 714         }
 715 
 716 #ifdef ASSERT
 717         // assert object type is really an array of the proper kind
 718         {
 719           Label ok;
 720           Register t0 = obj;
 721           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
 722           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
 723           switch (id) {
 724           case new_type_array_id:
 725             __ cmpw(t0, Klass::_lh_array_tag_type_value);
 726             __ br(Assembler::EQ, ok);
 727             __ stop("assert(is a type array klass)");
 728             break;
 729           case new_object_array_id:
 730             __ cmpw(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
 731             __ br(Assembler::EQ, ok);
 732             __ cmpw(t0, Klass::_lh_array_tag_vt_value);  // new "[LVT;"
 733             __ br(Assembler::EQ, ok);
 734             __ stop("assert(is an object or inline type array klass)");
 735             break;
 736           case new_flat_array_id:
 737             // new "[QVT;"
 738             __ cmpw(t0, Klass::_lh_array_tag_vt_value);  // the array can be flattened.
 739             __ br(Assembler::EQ, ok);
 740             __ cmpw(t0, Klass::_lh_array_tag_obj_value); // the array cannot be flattened (due to InlineArrayElementMaxFlatSize, etc)
 741             __ br(Assembler::EQ, ok);
 742             __ stop("assert(is an object or inline type array klass)");
 743             break;
 744           default:  ShouldNotReachHere();
 745           }
 746           __ should_not_reach_here();
 747           __ bind(ok);
 748         }
 749 #endif // ASSERT
 750 
 751         __ enter();
 752         OopMap* map = save_live_registers(sasm);
 753         int call_offset;
 754         if (id == new_type_array_id) {
 755           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 756         } else if (id == new_object_array_id) {
 757           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 758         } else {
 759           assert(id == new_flat_array_id, "must be");
 760           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_flat_array), klass, length);
 761         }
 762 
 763         oop_maps = new OopMapSet();
 764         oop_maps->add_gc_map(call_offset, map);
 765         restore_live_registers_except_r0(sasm);
 766 
 767         __ verify_oop(obj);
 768         __ leave();
 769         __ ret(lr);
 770 
 771         // r0: new array
 772       }
 773       break;
 774 
 775     case new_multi_array_id:
 776       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 777         // r0,: klass
 778         // r19,: rank
 779         // r2: address of 1st dimension
 780         OopMap* map = save_live_registers(sasm);
 781         __ mov(c_rarg1, r0);
 782         __ mov(c_rarg3, r2);
 783         __ mov(c_rarg2, r19);
 784         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
 785 
 786         oop_maps = new OopMapSet();
 787         oop_maps->add_gc_map(call_offset, map);
 788         restore_live_registers_except_r0(sasm);
 789 
 790         // r0,: new multi array
 791         __ verify_oop(r0);
 792       }
 793       break;
 794 
 795     case buffer_inline_args_id:
 796     case buffer_inline_args_no_receiver_id:
 797       {
 798         const char* name = (id == buffer_inline_args_id) ?
 799           "buffer_inline_args" : "buffer_inline_args_no_receiver";
 800         StubFrame f(sasm, name, dont_gc_arguments);
 801         OopMap* map = save_live_registers(sasm);
 802         Register method = r19;   // Incoming
 803         address entry = (id == buffer_inline_args_id) ?
 804           CAST_FROM_FN_PTR(address, buffer_inline_args) :
 805           CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
 806         // This is called from a C1 method's scalarized entry point
 807         // where r0-r7 may be holding live argument values so we can't
 808         // return the result in r0 as the other stubs do. LR is used as
 809         // a temporay below to avoid the result being clobbered by
 810         // restore_live_registers.
 811         int call_offset = __ call_RT(lr, noreg, entry, method);
 812         oop_maps = new OopMapSet();
 813         oop_maps->add_gc_map(call_offset, map);
 814         restore_live_registers(sasm);
 815         __ mov(r20, lr);
 816         __ verify_oop(r20);  // r20: an array of buffered value objects
 817      }
 818      break;
 819 
 820     case load_flattened_array_id:
 821       {
 822         StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
 823         OopMap* map = save_live_registers(sasm);
 824 
 825         // Called with store_parameter and not C abi
 826 
 827         f.load_argument(1, r0); // r0,: array
 828         f.load_argument(0, r1); // r1,: index
 829         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), r0, r1);
 830 
 831         oop_maps = new OopMapSet();
 832         oop_maps->add_gc_map(call_offset, map);
 833         restore_live_registers_except_r0(sasm);
 834 
 835         // r0: loaded element at array[index]
 836         __ verify_oop(r0);
 837       }
 838       break;
 839 
 840     case store_flattened_array_id:
 841       {
 842         StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
 843         OopMap* map = save_live_registers(sasm, 4);
 844 
 845         // Called with store_parameter and not C abi
 846 
 847         f.load_argument(2, r0); // r0: array
 848         f.load_argument(1, r1); // r1: index
 849         f.load_argument(0, r2); // r2: value
 850         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), r0, r1, r2);
 851 
 852         oop_maps = new OopMapSet();
 853         oop_maps->add_gc_map(call_offset, map);
 854         restore_live_registers_except_r0(sasm);
 855       }
 856       break;
 857 
 858     case substitutability_check_id:
 859       {
 860         StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
 861         OopMap* map = save_live_registers(sasm);
 862 
 863         // Called with store_parameter and not C abi
 864 
 865         f.load_argument(1, r1); // r1,: left
 866         f.load_argument(0, r2); // r2,: right
 867         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), r1, r2);
 868 
 869         oop_maps = new OopMapSet();
 870         oop_maps->add_gc_map(call_offset, map);
 871         restore_live_registers_except_r0(sasm);
 872 
 873         // r0,: are the two operands substitutable
 874       }
 875       break;
 876 
 877     case register_finalizer_id:
 878       {
 879         __ set_info("register_finalizer", dont_gc_arguments);
 880 
 881         // This is called via call_runtime so the arguments
 882         // will be place in C abi locations
 883 
 884         __ verify_oop(c_rarg0);
 885 
 886         // load the klass and check the has finalizer flag
 887         Label register_finalizer;
 888         Register t = r5;
 889         __ load_klass(t, r0);
 890         __ ldrw(t, Address(t, Klass::access_flags_offset()));
 891         __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
 892         __ ret(lr);
 893 
 894         __ bind(register_finalizer);
 895         __ enter();
 896         OopMap* oop_map = save_live_registers(sasm);
 897         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
 898         oop_maps = new OopMapSet();
 899         oop_maps->add_gc_map(call_offset, oop_map);
 900 
 901         // Now restore all the live registers
 902         restore_live_registers(sasm);
 903 
 904         __ leave();
 905         __ ret(lr);
 906       }
 907       break;
 908 
 909     case throw_class_cast_exception_id:
 910       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
 911         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 912       }
 913       break;
 914 
 915     case throw_incompatible_class_change_error_id:
 916       { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments, does_not_return);
 917         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 918       }
 919       break;
 920 
 921     case throw_illegal_monitor_state_exception_id:
 922       { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
 923         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
 924       }
 925       break;
 926 
 927     case slow_subtype_check_id:
 928       {
 929         // Typical calling sequence:
 930         // __ push(klass_RInfo);  // object klass or other subclass
 931         // __ push(sup_k_RInfo);  // array element klass or other superclass
 932         // __ bl(slow_subtype_check);
 933         // Note that the subclass is pushed first, and is therefore deepest.
 934         enum layout {
 935           r0_off, r0_off_hi,
 936           r2_off, r2_off_hi,
 937           r4_off, r4_off_hi,
 938           r5_off, r5_off_hi,
 939           sup_k_off, sup_k_off_hi,
 940           klass_off, klass_off_hi,
 941           framesize,
 942           result_off = sup_k_off
 943         };
 944 
 945         __ set_info("slow_subtype_check", dont_gc_arguments);
 946         __ push(RegSet::of(r0, r2, r4, r5), sp);

1110         __ leave();
1111         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1112         assert(deopt_blob != nullptr, "deoptimization blob must have been created");
1113 
1114         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1115       }
1116       break;
1117 
1118     case dtrace_object_alloc_id:
1119       { // c_rarg0: object
1120         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1121         save_live_registers(sasm);
1122 
1123         __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), c_rarg0);
1124 
1125         restore_live_registers(sasm);
1126       }
1127       break;
1128 
1129     default:
1130       // FIXME: For unhandled trap_id this code fails with assert during vm intialization
1131       // rather than insert a call to unimplemented_entry
1132       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1133         __ mov(r0, (int)id);
1134         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1135       }
1136       break;
1137     }
1138   }
1139 
1140 
1141   return oop_maps;
1142 }
1143 
1144 #undef __
1145 
1146 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
< prev index next >