< prev index next >

src/hotspot/cpu/arm/sharedRuntime_arm.cpp

Print this page




 735       receiver_reg = r->as_Register();
 736     }
 737   }
 738 
 739   // Figure out which address we are really jumping to:
 740   MethodHandles::generate_method_handle_dispatch(masm, iid,
 741                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 742 }
 743 
 744 // ---------------------------------------------------------------------------
 745 // Generate a native wrapper for a given method.  The method takes arguments
 746 // in the Java compiled code convention, marshals them to the native
 747 // convention (handlizes oops, etc), transitions to native, makes the call,
 748 // returns to java state (possibly blocking), unhandlizes any result and
 749 // returns.
 750 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
 751                                                 const methodHandle& method,
 752                                                 int compile_id,
 753                                                 BasicType* in_sig_bt,
 754                                                 VMRegPair* in_regs,
 755                                                 BasicType ret_type,
 756                                                 address critical_entry) {
 757   if (method->is_method_handle_intrinsic()) {
 758     vmIntrinsics::ID iid = method->intrinsic_id();
 759     intptr_t start = (intptr_t)__ pc();
 760     int vep_offset = ((intptr_t)__ pc()) - start;
 761     gen_special_dispatch(masm,
 762                          method,
 763                          in_sig_bt,
 764                          in_regs);
 765     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 766     __ flush();
 767     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 768     return nmethod::new_native_nmethod(method,
 769                                        compile_id,
 770                                        masm->code(),
 771                                        vep_offset,
 772                                        frame_complete,
 773                                        stack_slots / VMRegImpl::slots_per_word,
 774                                        in_ByteSize(-1),
 775                                        in_ByteSize(-1),
 776                                        (OopMapSet*)NULL);


 844   int vep_offset = __ pc() - start;
 845 
 846 
 847   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
 848     // Object.hashCode, System.identityHashCode can pull the hashCode from the header word
 849     // instead of doing a full VM transition once it's been computed.
 850     Label slow_case;
 851     const Register obj_reg = R0;
 852 
 853     // Unlike for Object.hashCode, System.identityHashCode is static method and
 854     // gets object as argument instead of the receiver.
 855     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
 856       assert(method->is_static(), "method should be static");
 857       // return 0 for null reference input, return val = R0 = obj_reg = 0
 858       __ cmp(obj_reg, 0);
 859       __ bx(LR, eq);
 860     }
 861 
 862     __ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 863 
 864     assert(markWord::unlocked_value == 1, "adjust this code");
 865     __ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
 866 
 867     if (UseBiasedLocking) {
 868       assert(is_power_of_2(markWord::biased_lock_bit_in_place), "adjust this code");
 869       __ tbnz(Rtemp, exact_log2(markWord::biased_lock_bit_in_place), slow_case);
 870     }
 871 
 872     __ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
 873     __ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
 874     __ bx(LR, ne);
 875 
 876     __ bind(slow_case);
 877   }
 878 
 879   // Bang stack pages
 880   __ arm_stack_overflow_check(stack_size, Rtemp);
 881 
 882   // Setup frame linkage
 883   __ raw_push(FP, LR);
 884   __ mov(FP, SP);
 885   __ sub_slow(SP, SP, stack_size - 2*wordSize);
 886 
 887   int frame_complete = __ pc() - start;
 888 
 889   OopMapSet* oop_maps = new OopMapSet();
 890   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 891   const int extra_args = is_static ? 2 : 1;
 892   int receiver_offset = -1;
 893   int fp_regs_in_arguments = 0;


1155 
1156   Label slow_lock, slow_lock_biased, lock_done, fast_lock;
1157   if (method->is_synchronized()) {
1158     // The first argument is a handle to sync object (a class or an instance)
1159     __ ldr(sync_obj, Address(R1));
1160     // Remember the handle for the unlocking code
1161     __ mov(sync_handle, R1);
1162 
1163     __ resolve(IS_NOT_NULL, sync_obj);
1164 
1165     if(UseBiasedLocking) {
1166       __ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
1167     }
1168 
1169     const Register mark = tmp;
1170     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
1171     // That would be acceptable as either CAS or slow case path is taken in that case
1172 
1173     __ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
1174     __ sub(disp_hdr, FP, lock_slot_fp_offset);
1175     __ tst(mark, markWord::unlocked_value);
1176     __ b(fast_lock, ne);
1177 
1178     // Check for recursive lock
1179     // See comments in InterpreterMacroAssembler::lock_object for
1180     // explanations on the fast recursive locking check.
1181     // Check independently the low bits and the distance to SP
1182     // -1- test low 2 bits
1183     __ movs(Rtemp, AsmOperand(mark, lsl, 30));
1184     // -2- test (hdr - SP) if the low two bits are 0
1185     __ sub(Rtemp, mark, SP, eq);
1186     __ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
1187     // If still 'eq' then recursive locking OK: set displaced header to 0
1188     __ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()), eq);
1189     __ b(lock_done, eq);
1190     __ b(slow_lock);
1191 
1192     __ bind(fast_lock);
1193     __ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1194 
1195     __ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);




 735       receiver_reg = r->as_Register();
 736     }
 737   }
 738 
 739   // Figure out which address we are really jumping to:
 740   MethodHandles::generate_method_handle_dispatch(masm, iid,
 741                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 742 }
 743 
 744 // ---------------------------------------------------------------------------
 745 // Generate a native wrapper for a given method.  The method takes arguments
 746 // in the Java compiled code convention, marshals them to the native
 747 // convention (handlizes oops, etc), transitions to native, makes the call,
 748 // returns to java state (possibly blocking), unhandlizes any result and
 749 // returns.
 750 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
 751                                                 const methodHandle& method,
 752                                                 int compile_id,
 753                                                 BasicType* in_sig_bt,
 754                                                 VMRegPair* in_regs,
 755                                                 BasicType ret_type) {

 756   if (method->is_method_handle_intrinsic()) {
 757     vmIntrinsics::ID iid = method->intrinsic_id();
 758     intptr_t start = (intptr_t)__ pc();
 759     int vep_offset = ((intptr_t)__ pc()) - start;
 760     gen_special_dispatch(masm,
 761                          method,
 762                          in_sig_bt,
 763                          in_regs);
 764     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
 765     __ flush();
 766     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
 767     return nmethod::new_native_nmethod(method,
 768                                        compile_id,
 769                                        masm->code(),
 770                                        vep_offset,
 771                                        frame_complete,
 772                                        stack_slots / VMRegImpl::slots_per_word,
 773                                        in_ByteSize(-1),
 774                                        in_ByteSize(-1),
 775                                        (OopMapSet*)NULL);


 843   int vep_offset = __ pc() - start;
 844 
 845 
 846   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
 847     // Object.hashCode, System.identityHashCode can pull the hashCode from the header word
 848     // instead of doing a full VM transition once it's been computed.
 849     Label slow_case;
 850     const Register obj_reg = R0;
 851 
 852     // Unlike for Object.hashCode, System.identityHashCode is static method and
 853     // gets object as argument instead of the receiver.
 854     if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
 855       assert(method->is_static(), "method should be static");
 856       // return 0 for null reference input, return val = R0 = obj_reg = 0
 857       __ cmp(obj_reg, 0);
 858       __ bx(LR, eq);
 859     }
 860 
 861     __ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 862 
 863     assert(markOopDesc::unlocked_value == 1, "adjust this code");
 864     __ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
 865 
 866     if (UseBiasedLocking) {
 867       assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
 868       __ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
 869     }
 870 
 871     __ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
 872     __ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
 873     __ bx(LR, ne);
 874 
 875     __ bind(slow_case);
 876   }
 877 
 878   // Bang stack pages
 879   __ arm_stack_overflow_check(stack_size, Rtemp);
 880 
 881   // Setup frame linkage
 882   __ raw_push(FP, LR);
 883   __ mov(FP, SP);
 884   __ sub_slow(SP, SP, stack_size - 2*wordSize);
 885 
 886   int frame_complete = __ pc() - start;
 887 
 888   OopMapSet* oop_maps = new OopMapSet();
 889   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
 890   const int extra_args = is_static ? 2 : 1;
 891   int receiver_offset = -1;
 892   int fp_regs_in_arguments = 0;


1154 
1155   Label slow_lock, slow_lock_biased, lock_done, fast_lock;
1156   if (method->is_synchronized()) {
1157     // The first argument is a handle to sync object (a class or an instance)
1158     __ ldr(sync_obj, Address(R1));
1159     // Remember the handle for the unlocking code
1160     __ mov(sync_handle, R1);
1161 
1162     __ resolve(IS_NOT_NULL, sync_obj);
1163 
1164     if(UseBiasedLocking) {
1165       __ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
1166     }
1167 
1168     const Register mark = tmp;
1169     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
1170     // That would be acceptable as either CAS or slow case path is taken in that case
1171 
1172     __ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
1173     __ sub(disp_hdr, FP, lock_slot_fp_offset);
1174     __ tst(mark, markOopDesc::unlocked_value);
1175     __ b(fast_lock, ne);
1176 
1177     // Check for recursive lock
1178     // See comments in InterpreterMacroAssembler::lock_object for
1179     // explanations on the fast recursive locking check.
1180     // Check independently the low bits and the distance to SP
1181     // -1- test low 2 bits
1182     __ movs(Rtemp, AsmOperand(mark, lsl, 30));
1183     // -2- test (hdr - SP) if the low two bits are 0
1184     __ sub(Rtemp, mark, SP, eq);
1185     __ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
1186     // If still 'eq' then recursive locking OK: set displaced header to 0
1187     __ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()), eq);
1188     __ b(lock_done, eq);
1189     __ b(slow_lock);
1190 
1191     __ bind(fast_lock);
1192     __ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
1193 
1194     __ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);


< prev index next >