< prev index next >

src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp

Print this page

 618       {
 619         oop_maps = generate_handle_exception(id, sasm);
 620         __ leave();
 621         __ ret(lr);
 622       }
 623       break;
 624 
 625     case throw_div0_exception_id:
 626       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
 627         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 628       }
 629       break;
 630 
 631     case throw_null_pointer_exception_id:
 632       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
 633         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 634       }
 635       break;
 636 
 637     case new_instance_id:

 638     case fast_new_instance_id:
 639     case fast_new_instance_init_check_id:
 640       {
 641         Register klass = r3; // Incoming
 642         Register obj   = r0; // Result
 643 
 644         if (id == new_instance_id) {
 645           __ set_info("new_instance", dont_gc_arguments);


 646         } else if (id == fast_new_instance_id) {
 647           __ set_info("fast new_instance", dont_gc_arguments);
 648         } else {
 649           assert(id == fast_new_instance_init_check_id, "bad StubID");
 650           __ set_info("fast new_instance init check", dont_gc_arguments);
 651         }
 652 
 653         // If TLAB is disabled, see if there is support for inlining contiguous
 654         // allocations.
 655         // Otherwise, just go to the slow path.
 656         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 657             !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 658           Label slow_path;
 659           Register obj_size = r19;
 660           Register t1       = r10;
 661           Register t2       = r11;
 662           assert_different_registers(klass, obj, obj_size, t1, t2);
 663 
 664           __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
 665 

 685             __ bind(ok);
 686           }
 687 #endif // ASSERT
 688 
 689           // get the instance size (size is postive so movl is fine for 64bit)
 690           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 691 
 692           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
 693 
 694           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
 695           __ verify_oop(obj);
 696           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 697           __ ret(lr);
 698 
 699           __ bind(slow_path);
 700           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 701         }
 702 
 703         __ enter();
 704         OopMap* map = save_live_registers(sasm);
 705         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);





 706         oop_maps = new OopMapSet();
 707         oop_maps->add_gc_map(call_offset, map);
 708         restore_live_registers_except_r0(sasm);
 709         __ verify_oop(obj);
 710         __ leave();
 711         __ ret(lr);
 712 
 713         // r0,: new instance
 714       }
 715 
 716       break;
 717 
 718     case counter_overflow_id:
 719       {
 720         Register bci = r0, method = r1;
 721         __ enter();
 722         OopMap* map = save_live_registers(sasm);
 723         // Retrieve bci
 724         __ ldrw(bci, Address(rfp, 2*BytesPerWord));
 725         // And a pointer to the Method*
 726         __ ldr(method, Address(rfp, 3*BytesPerWord));
 727         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 728         oop_maps = new OopMapSet();
 729         oop_maps->add_gc_map(call_offset, map);
 730         restore_live_registers(sasm);
 731         __ leave();
 732         __ ret(lr);
 733       }
 734       break;
 735 
 736     case new_type_array_id:
 737     case new_object_array_id:

 738       {
 739         Register length   = r19; // Incoming
 740         Register klass    = r3; // Incoming
 741         Register obj      = r0; // Result
 742 
 743         if (id == new_type_array_id) {
 744           __ set_info("new_type_array", dont_gc_arguments);
 745         } else {
 746           __ set_info("new_object_array", dont_gc_arguments);


 747         }
 748 
 749 #ifdef ASSERT
 750         // assert object type is really an array of the proper kind
 751         {
 752           Label ok;
 753           Register t0 = obj;
 754           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
 755           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
 756           int tag = ((id == new_type_array_id)
 757                      ? Klass::_lh_array_tag_type_value
 758                      : Klass::_lh_array_tag_obj_value);
 759           __ mov(rscratch1, tag);
 760           __ cmpw(t0, rscratch1);
 761           __ br(Assembler::EQ, ok);
 762           __ stop("assert(is an array klass)");
















 763           __ should_not_reach_here();
 764           __ bind(ok);
 765         }
 766 #endif // ASSERT
 767 
 768         // If TLAB is disabled, see if there is support for inlining contiguous
 769         // allocations.
 770         // Otherwise, just go to the slow path.
 771         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 772           Register arr_size = r5;
 773           Register t1       = r10;
 774           Register t2       = r11;
 775           Label slow_path;
 776           assert_different_registers(length, klass, obj, arr_size, t1, t2);
 777 
 778           // check that array length is small enough for fast path.
 779           __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
 780           __ cmpw(length, rscratch1);
 781           __ br(Assembler::HI, slow_path);
 782 

 798           __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
 799           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 800           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
 801           __ andr(t1, t1, Klass::_lh_header_size_mask);
 802           __ sub(arr_size, arr_size, t1);  // body length
 803           __ add(t1, t1, obj);       // body start
 804           __ initialize_body(t1, arr_size, 0, t1, t2);
 805           __ membar(Assembler::StoreStore);
 806           __ verify_oop(obj);
 807 
 808           __ ret(lr);
 809 
 810           __ bind(slow_path);
 811         }
 812 
 813         __ enter();
 814         OopMap* map = save_live_registers(sasm);
 815         int call_offset;
 816         if (id == new_type_array_id) {
 817           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 818         } else {
 819           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);



 820         }
 821 
 822         oop_maps = new OopMapSet();
 823         oop_maps->add_gc_map(call_offset, map);
 824         restore_live_registers_except_r0(sasm);
 825 
 826         __ verify_oop(obj);
 827         __ leave();
 828         __ ret(lr);
 829 
 830         // r0: new array
 831       }
 832       break;
 833 
 834     case new_multi_array_id:
 835       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 836         // r0,: klass
 837         // r19,: rank
 838         // r2: address of 1st dimension
 839         OopMap* map = save_live_registers(sasm);
 840         __ mov(c_rarg1, r0);
 841         __ mov(c_rarg3, r2);
 842         __ mov(c_rarg2, r19);
 843         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
 844 
 845         oop_maps = new OopMapSet();
 846         oop_maps->add_gc_map(call_offset, map);
 847         restore_live_registers_except_r0(sasm);
 848 
 849         // r0,: new multi array
 850         __ verify_oop(r0);
 851       }
 852       break;
 853 


















































































 854     case register_finalizer_id:
 855       {
 856         __ set_info("register_finalizer", dont_gc_arguments);
 857 
 858         // This is called via call_runtime so the arguments
 859         // will be place in C abi locations
 860 
 861         __ verify_oop(c_rarg0);
 862 
 863         // load the klass and check the has finalizer flag
 864         Label register_finalizer;
 865         Register t = r5;
 866         __ load_klass(t, r0);
 867         __ ldrw(t, Address(t, Klass::access_flags_offset()));
 868         __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
 869         __ ret(lr);
 870 
 871         __ bind(register_finalizer);
 872         __ enter();
 873         OopMap* oop_map = save_live_registers(sasm);
 874         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
 875         oop_maps = new OopMapSet();
 876         oop_maps->add_gc_map(call_offset, oop_map);
 877 
 878         // Now restore all the live registers
 879         restore_live_registers(sasm);
 880 
 881         __ leave();
 882         __ ret(lr);
 883       }
 884       break;
 885 
 886     case throw_class_cast_exception_id:
 887       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
 888         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 889       }
 890       break;
 891 
 892     case throw_incompatible_class_change_error_id:
 893       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
 894         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 895       }
 896       break;
 897 






 898     case slow_subtype_check_id:
 899       {
 900         // Typical calling sequence:
 901         // __ push(klass_RInfo);  // object klass or other subclass
 902         // __ push(sup_k_RInfo);  // array element klass or other superclass
 903         // __ bl(slow_subtype_check);
 904         // Note that the subclass is pushed first, and is therefore deepest.
 905         enum layout {
 906           r0_off, r0_off_hi,
 907           r2_off, r2_off_hi,
 908           r4_off, r4_off_hi,
 909           r5_off, r5_off_hi,
 910           sup_k_off, sup_k_off_hi,
 911           klass_off, klass_off_hi,
 912           framesize,
 913           result_off = sup_k_off
 914         };
 915 
 916         __ set_info("slow_subtype_check", dont_gc_arguments);
 917         __ push(RegSet::of(r0, r2, r4, r5), sp);

1081         __ leave();
1082         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1083         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1084 
1085         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1086       }
1087       break;
1088 
1089     case dtrace_object_alloc_id:
1090       { // c_rarg0: object
1091         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1092         save_live_registers(sasm);
1093 
1094         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);
1095 
1096         restore_live_registers(sasm);
1097       }
1098       break;
1099 
1100     default:


1101       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1102         __ mov(r0, (int)id);
1103         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1104       }
1105       break;
1106     }
1107   }


1108   return oop_maps;
1109 }
1110 
1111 #undef __
1112 
1113 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }

 618       {
 619         oop_maps = generate_handle_exception(id, sasm);
 620         __ leave();
 621         __ ret(lr);
 622       }
 623       break;
 624 
 625     case throw_div0_exception_id:
 626       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
 627         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 628       }
 629       break;
 630 
 631     case throw_null_pointer_exception_id:
 632       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
 633         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 634       }
 635       break;
 636 
 637     case new_instance_id:
 638     case new_instance_no_inline_id:
 639     case fast_new_instance_id:
 640     case fast_new_instance_init_check_id:
 641       {
 642         Register klass = r3; // Incoming
 643         Register obj   = r0; // Result
 644 
 645         if (id == new_instance_id) {
 646           __ set_info("new_instance", dont_gc_arguments);
 647         } else if (id == new_instance_no_inline_id) {
 648           __ set_info("new_instance_no_inline", dont_gc_arguments);
 649         } else if (id == fast_new_instance_id) {
 650           __ set_info("fast new_instance", dont_gc_arguments);
 651         } else {
 652           assert(id == fast_new_instance_init_check_id, "bad StubID");
 653           __ set_info("fast new_instance init check", dont_gc_arguments);
 654         }
 655 
 656         // If TLAB is disabled, see if there is support for inlining contiguous
 657         // allocations.
 658         // Otherwise, just go to the slow path.
 659         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 660             !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 661           Label slow_path;
 662           Register obj_size = r19;
 663           Register t1       = r10;
 664           Register t2       = r11;
 665           assert_different_registers(klass, obj, obj_size, t1, t2);
 666 
 667           __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
 668 

 688             __ bind(ok);
 689           }
 690 #endif // ASSERT
 691 
 692           // get the instance size (size is postive so movl is fine for 64bit)
 693           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 694 
 695           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
 696 
 697           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
 698           __ verify_oop(obj);
 699           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 700           __ ret(lr);
 701 
 702           __ bind(slow_path);
 703           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 704         }
 705 
 706         __ enter();
 707         OopMap* map = save_live_registers(sasm);
 708         int call_offset;
 709         if (id == new_instance_no_inline_id) {
 710           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance_no_inline), klass);
 711         } else {
 712           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 713         }
 714         oop_maps = new OopMapSet();
 715         oop_maps->add_gc_map(call_offset, map);
 716         restore_live_registers_except_r0(sasm);
 717         __ verify_oop(obj);
 718         __ leave();
 719         __ ret(lr);
 720 
 721         // r0,: new instance
 722       }
 723 
 724       break;
 725 
 726     case counter_overflow_id:
 727       {
 728         Register bci = r0, method = r1;
 729         __ enter();
 730         OopMap* map = save_live_registers(sasm);
 731         // Retrieve bci
 732         __ ldrw(bci, Address(rfp, 2*BytesPerWord));
 733         // And a pointer to the Method*
 734         __ ldr(method, Address(rfp, 3*BytesPerWord));
 735         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 736         oop_maps = new OopMapSet();
 737         oop_maps->add_gc_map(call_offset, map);
 738         restore_live_registers(sasm);
 739         __ leave();
 740         __ ret(lr);
 741       }
 742       break;
 743 
 744     case new_type_array_id:
 745     case new_object_array_id:
 746     case new_flat_array_id:
 747       {
 748         Register length   = r19; // Incoming
 749         Register klass    = r3; // Incoming
 750         Register obj      = r0; // Result
 751 
 752         if (id == new_type_array_id) {
 753           __ set_info("new_type_array", dont_gc_arguments);
 754         } else if (id == new_object_array_id) {
 755           __ set_info("new_object_array", dont_gc_arguments);
 756         } else {
 757           __ set_info("new_flat_array", dont_gc_arguments);
 758         }
 759 
 760 #ifdef ASSERT
 761         // assert object type is really an array of the proper kind
 762         {
 763           Label ok;
 764           Register t0 = obj;
 765           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
 766           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
 767           switch (id) {
 768           case new_type_array_id:
 769             __ cmpw(t0, Klass::_lh_array_tag_type_value);
 770             __ br(Assembler::EQ, ok);
 771             __ stop("assert(is a type array klass)");
 772             break;
 773           case new_object_array_id:
 774             __ cmpw(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
 775             __ br(Assembler::EQ, ok);
 776             __ cmpw(t0, Klass::_lh_array_tag_vt_value);  // new "[LVT;"
 777             __ br(Assembler::EQ, ok);
 778             __ stop("assert(is an object or inline type array klass)");
 779             break;
 780           case new_flat_array_id:
 781             // new "[QVT;"
 782             __ cmpw(t0, Klass::_lh_array_tag_vt_value);  // the array can be flattened.
 783             __ br(Assembler::EQ, ok);
 784             __ cmpw(t0, Klass::_lh_array_tag_obj_value); // the array cannot be flattened (due to InlineArrayElementMaxFlatSize, etc)
 785             __ br(Assembler::EQ, ok);
 786             __ stop("assert(is an object or inline type array klass)");
 787             break;
 788           default:  ShouldNotReachHere();
 789           }
 790           __ should_not_reach_here();
 791           __ bind(ok);
 792         }
 793 #endif // ASSERT
 794 
 795         // If TLAB is disabled, see if there is support for inlining contiguous
 796         // allocations.
 797         // Otherwise, just go to the slow path.
 798         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 799           Register arr_size = r5;
 800           Register t1       = r10;
 801           Register t2       = r11;
 802           Label slow_path;
 803           assert_different_registers(length, klass, obj, arr_size, t1, t2);
 804 
 805           // check that array length is small enough for fast path.
 806           __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
 807           __ cmpw(length, rscratch1);
 808           __ br(Assembler::HI, slow_path);
 809 

 825           __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
 826           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 827           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
 828           __ andr(t1, t1, Klass::_lh_header_size_mask);
 829           __ sub(arr_size, arr_size, t1);  // body length
 830           __ add(t1, t1, obj);       // body start
 831           __ initialize_body(t1, arr_size, 0, t1, t2);
 832           __ membar(Assembler::StoreStore);
 833           __ verify_oop(obj);
 834 
 835           __ ret(lr);
 836 
 837           __ bind(slow_path);
 838         }
 839 
 840         __ enter();
 841         OopMap* map = save_live_registers(sasm);
 842         int call_offset;
 843         if (id == new_type_array_id) {
 844           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 845         } else if (id == new_object_array_id) {
 846           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 847         } else {
 848           assert(id == new_flat_array_id, "must be");
 849           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_flat_array), klass, length);
 850         }
 851 
 852         oop_maps = new OopMapSet();
 853         oop_maps->add_gc_map(call_offset, map);
 854         restore_live_registers_except_r0(sasm);
 855 
 856         __ verify_oop(obj);
 857         __ leave();
 858         __ ret(lr);
 859 
 860         // r0: new array
 861       }
 862       break;
 863 
 864     case new_multi_array_id:
 865       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 866         // r0,: klass
 867         // r19,: rank
 868         // r2: address of 1st dimension
 869         OopMap* map = save_live_registers(sasm);
 870         __ mov(c_rarg1, r0);
 871         __ mov(c_rarg3, r2);
 872         __ mov(c_rarg2, r19);
 873         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
 874 
 875         oop_maps = new OopMapSet();
 876         oop_maps->add_gc_map(call_offset, map);
 877         restore_live_registers_except_r0(sasm);
 878 
 879         // r0,: new multi array
 880         __ verify_oop(r0);
 881       }
 882       break;
 883 
 884     case buffer_inline_args_id:
 885     case buffer_inline_args_no_receiver_id:
 886       {
 887         const char* name = (id == buffer_inline_args_id) ?
 888           "buffer_inline_args" : "buffer_inline_args_no_receiver";
 889         StubFrame f(sasm, name, dont_gc_arguments);
 890         OopMap* map = save_live_registers(sasm);
 891         Register method = r19;   // Incoming
 892         address entry = (id == buffer_inline_args_id) ?
 893           CAST_FROM_FN_PTR(address, buffer_inline_args) :
 894           CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
 895         // This is called from a C1 method's scalarized entry point
 896         // where r0-r7 may be holding live argument values so we can't
 897         // return the result in r0 as the other stubs do. LR is used as
 898         // a temporay below to avoid the result being clobbered by
 899         // restore_live_registers.
 900         int call_offset = __ call_RT(lr, noreg, entry, method);
 901         oop_maps = new OopMapSet();
 902         oop_maps->add_gc_map(call_offset, map);
 903         restore_live_registers(sasm);
 904         __ mov(r20, lr);
 905         __ verify_oop(r20);  // r20: an array of buffered value objects
 906      }
 907      break;
 908 
 909     case load_flattened_array_id:
 910       {
 911         StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
 912         OopMap* map = save_live_registers(sasm);
 913 
 914         // Called with store_parameter and not C abi
 915 
 916         f.load_argument(1, r0); // r0,: array
 917         f.load_argument(0, r1); // r1,: index
 918         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), r0, r1);
 919 
 920         oop_maps = new OopMapSet();
 921         oop_maps->add_gc_map(call_offset, map);
 922         restore_live_registers_except_r0(sasm);
 923 
 924         // r0: loaded element at array[index]
 925         __ verify_oop(r0);
 926       }
 927       break;
 928 
 929     case store_flattened_array_id:
 930       {
 931         StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
 932         OopMap* map = save_live_registers(sasm, 4);
 933 
 934         // Called with store_parameter and not C abi
 935 
 936         f.load_argument(2, r0); // r0: array
 937         f.load_argument(1, r1); // r1: index
 938         f.load_argument(0, r2); // r2: value
 939         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), r0, r1, r2);
 940 
 941         oop_maps = new OopMapSet();
 942         oop_maps->add_gc_map(call_offset, map);
 943         restore_live_registers_except_r0(sasm);
 944       }
 945       break;
 946 
 947     case substitutability_check_id:
 948       {
 949         StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
 950         OopMap* map = save_live_registers(sasm);
 951 
 952         // Called with store_parameter and not C abi
 953 
 954         f.load_argument(1, r1); // r1,: left
 955         f.load_argument(0, r2); // r2,: right
 956         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), r1, r2);
 957 
 958         oop_maps = new OopMapSet();
 959         oop_maps->add_gc_map(call_offset, map);
 960         restore_live_registers_except_r0(sasm);
 961 
 962         // r0,: are the two operands substitutable
 963       }
 964       break;
 965 
 966     case register_finalizer_id:
 967       {
 968         __ set_info("register_finalizer", dont_gc_arguments);
 969 
 970         // This is called via call_runtime so the arguments
 971         // will be place in C abi locations
 972 
 973         __ verify_oop(c_rarg0);
 974 
 975         // load the klass and check the has finalizer flag
 976         Label register_finalizer;
 977         Register t = r5;
 978         __ load_klass(t, r0);
 979         __ ldrw(t, Address(t, Klass::access_flags_offset()));
 980         __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
 981         __ ret(lr);
 982 
 983         __ bind(register_finalizer);
 984         __ enter();
 985         OopMap* oop_map = save_live_registers(sasm);
 986         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
 987         oop_maps = new OopMapSet();
 988         oop_maps->add_gc_map(call_offset, oop_map);
 989 
 990         // Now restore all the live registers
 991         restore_live_registers(sasm);
 992 
 993         __ leave();
 994         __ ret(lr);
 995       }
 996       break;
 997 
 998     case throw_class_cast_exception_id:
 999       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
1000         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1001       }
1002       break;
1003 
1004     case throw_incompatible_class_change_error_id:
1005       { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments, does_not_return);
1006         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1007       }
1008       break;
1009 
1010     case throw_illegal_monitor_state_exception_id:
1011       { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1012         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1013       }
1014       break;
1015 
1016     case slow_subtype_check_id:
1017       {
1018         // Typical calling sequence:
1019         // __ push(klass_RInfo);  // object klass or other subclass
1020         // __ push(sup_k_RInfo);  // array element klass or other superclass
1021         // __ bl(slow_subtype_check);
1022         // Note that the subclass is pushed first, and is therefore deepest.
1023         enum layout {
1024           r0_off, r0_off_hi,
1025           r2_off, r2_off_hi,
1026           r4_off, r4_off_hi,
1027           r5_off, r5_off_hi,
1028           sup_k_off, sup_k_off_hi,
1029           klass_off, klass_off_hi,
1030           framesize,
1031           result_off = sup_k_off
1032         };
1033 
1034         __ set_info("slow_subtype_check", dont_gc_arguments);
1035         __ push(RegSet::of(r0, r2, r4, r5), sp);

1199         __ leave();
1200         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1201         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1202 
1203         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1204       }
1205       break;
1206 
1207     case dtrace_object_alloc_id:
1208       { // c_rarg0: object
1209         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1210         save_live_registers(sasm);
1211 
1212         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);
1213 
1214         restore_live_registers(sasm);
1215       }
1216       break;
1217 
1218     default:
1219       // FIXME: For unhandled trap_id this code fails with assert during vm intialization
1220       // rather than insert a call to unimplemented_entry
1221       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
1222         __ mov(r0, (int)id);
1223         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1224       }
1225       break;
1226     }
1227   }
1228 
1229 
1230   return oop_maps;
1231 }
1232 
1233 #undef __
1234 
1235 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
< prev index next >