< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page

1123   stack_slots = 2; // will be adjusted in setup
1124   OopMap* map = continuation_enter_setup(masm, stack_slots);
1125   frame_complete = __ pc() - start;
1126 
1127   fill_continuation_entry(masm);
1128 
1129   __ cbnz(c_rarg2, call_thaw);
1130 
1131   const address tr_call = __ trampoline_call(resolve);
1132   if (tr_call == nullptr) {
1133     fatal("CodeCache is full at gen_continuation_enter");
1134   }
1135 
1136   oop_maps->add_gc_map(__ pc() - start, map);
1137   __ post_call_nop();
1138 
1139   __ b(exit);
1140 
1141   __ bind(call_thaw);
1142 

1143   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1144   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1145   ContinuationEntry::_return_pc_offset = __ pc() - start;
1146   __ post_call_nop();
1147 
1148   __ bind(exit);
1149   continuation_enter_cleanup(masm);
1150   __ leave();
1151   __ ret(lr);
1152 
1153   /// exception handling
1154 
1155   exception_offset = __ pc() - start;
1156   {
1157       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1158 
1159       continuation_enter_cleanup(masm);
1160 
1161       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1162       __ authenticate_return_address(c_rarg1);

1226     continuation_enter_cleanup(masm);
1227 
1228     __ bind(pinned); // pinned -- return to caller
1229 
1230     // handle pending exception thrown by freeze
1231     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1232     Label ok;
1233     __ cbz(rscratch1, ok);
1234     __ leave();
1235     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1236     __ br(rscratch1);
1237     __ bind(ok);
1238 
1239     __ leave();
1240     __ ret(lr);
1241 
1242     OopMap* map = new OopMap(framesize, 1);
1243     oop_maps->add_gc_map(the_pc - start, map);
1244 }
1245 




1246 static void gen_special_dispatch(MacroAssembler* masm,
1247                                  const methodHandle& method,
1248                                  const BasicType* sig_bt,
1249                                  const VMRegPair* regs) {
1250   verify_oop_args(masm, method, sig_bt, regs);
1251   vmIntrinsics::ID iid = method->intrinsic_id();
1252 
1253   // Now write the args into the outgoing interpreter space
1254   bool     has_receiver   = false;
1255   Register receiver_reg   = noreg;
1256   int      member_arg_pos = -1;
1257   Register member_reg     = noreg;
1258   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1259   if (ref_kind != 0) {
1260     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1261     member_reg = r19;  // known to be free at this point
1262     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1263   } else if (iid == vmIntrinsics::_invokeBasic) {
1264     has_receiver = true;
1265   } else if (iid == vmIntrinsics::_linkToNative) {

1777       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1778 
1779       // Hmm should this move to the slow path code area???
1780 
1781       // Test if the oopMark is an obvious stack pointer, i.e.,
1782       //  1) (mark & 3) == 0, and
1783       //  2) sp <= mark < mark + os::pagesize()
1784       // These 3 tests can be done by evaluating the following
1785       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1786       // assuming both stack pointer and pagesize have their
1787       // least significant 2 bits clear.
1788       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1789 
1790       __ sub(swap_reg, sp, swap_reg);
1791       __ neg(swap_reg, swap_reg);
1792       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1793 
1794       // Save the test result, for recursive case, the result is zero
1795       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1796       __ br(Assembler::NE, slow_path_lock);

1797     } else {
1798       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1799       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);

1800     }
1801     __ bind(count);
1802     __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1803 
1804     // Slow path will re-enter here
1805     __ bind(lock_done);
1806   }
1807 
1808 
1809   // Finally just about ready to make the JNI call
1810 
1811   // get JNIEnv* which is first argument to native
1812   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1813 
1814   // Now set thread in native
1815   __ mov(rscratch1, _thread_in_native);
1816   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1817   __ stlrw(rscratch1, rscratch2);
1818 
1819   __ rt_call(native_func);
1820 
1821   __ bind(native_return);
1822 

1895   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1896   __ br(Assembler::EQ, reguard);
1897   __ bind(reguard_done);
1898 
1899   // native result if any is live
1900 
1901   // Unlock
1902   Label unlock_done;
1903   Label slow_path_unlock;
1904   if (method->is_synchronized()) {
1905 
1906     // Get locked oop from the handle we passed to jni
1907     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1908 
1909     Label done, not_recursive;
1910 
1911     if (LockingMode == LM_LEGACY) {
1912       // Simple recursive lock?
1913       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1914       __ cbnz(rscratch1, not_recursive);
1915       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1916       __ b(done);
1917     }
1918 
1919     __ bind(not_recursive);
1920 
1921     // Must save r0 if if it is live now because cmpxchg must use it
1922     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1923       save_native_result(masm, ret_type, stack_slots);
1924     }
1925 
1926     if (LockingMode == LM_MONITOR) {
1927       __ b(slow_path_unlock);
1928     } else if (LockingMode == LM_LEGACY) {
1929       // get address of the stack lock
1930       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1931       //  get old displaced header
1932       __ ldr(old_hdr, Address(r0, 0));
1933 
1934       // Atomic swap old header if oop still contains the stack lock
1935       Label count;
1936       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1937       __ bind(count);
1938       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1939     } else {
1940       assert(LockingMode == LM_LIGHTWEIGHT, "");
1941       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1942       __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1943     }
1944 
1945     // slow path re-enters here
1946     __ bind(unlock_done);
1947     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1948       restore_native_result(masm, ret_type, stack_slots);
1949     }
1950 
1951     __ bind(done);
1952   }
1953 
1954   Label dtrace_method_exit, dtrace_method_exit_done;
1955   {
1956     uint64_t offset;
1957     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1958     __ ldrb(rscratch1, Address(rscratch1, offset));
1959     __ cbnzw(rscratch1, dtrace_method_exit);
1960     __ bind(dtrace_method_exit_done);
1961   }
1962 

2531   if (EnableJVMCI) {
2532     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2533     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2534   }
2535 #endif
2536 }
2537 
2538 // Number of stack slots between incoming argument block and the start of
2539 // a new frame.  The PROLOG must add this many slots to the stack.  The
2540 // EPILOG must remove this many slots. aarch64 needs two slots for
2541 // return address and fp.
2542 // TODO think this is correct but check
2543 uint SharedRuntime::in_preserve_stack_slots() {
2544   return 4;
2545 }
2546 
2547 uint SharedRuntime::out_preserve_stack_slots() {
2548   return 0;
2549 }
2550 




2551 #ifdef COMPILER2
2552 //------------------------------generate_uncommon_trap_blob--------------------
2553 void SharedRuntime::generate_uncommon_trap_blob() {
2554   // Allocate space for the code
2555   ResourceMark rm;
2556   // Setup code generation tools
2557   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2558   MacroAssembler* masm = new MacroAssembler(&buffer);
2559 
2560   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2561 
2562   address start = __ pc();
2563 
2564   // Push self-frame.  We get here with a return address in LR
2565   // and sp should be 16 byte aligned
2566   // push rfp and retaddr by hand
2567   __ protect_return_address();
2568   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2569   // we don't expect an arg reg save area
2570 #ifndef PRODUCT

1123   stack_slots = 2; // will be adjusted in setup
1124   OopMap* map = continuation_enter_setup(masm, stack_slots);
1125   frame_complete = __ pc() - start;
1126 
1127   fill_continuation_entry(masm);
1128 
1129   __ cbnz(c_rarg2, call_thaw);
1130 
1131   const address tr_call = __ trampoline_call(resolve);
1132   if (tr_call == nullptr) {
1133     fatal("CodeCache is full at gen_continuation_enter");
1134   }
1135 
1136   oop_maps->add_gc_map(__ pc() - start, map);
1137   __ post_call_nop();
1138 
1139   __ b(exit);
1140 
1141   __ bind(call_thaw);
1142 
1143   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1144   __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1145   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1146   ContinuationEntry::_return_pc_offset = __ pc() - start;
1147   __ post_call_nop();
1148 
1149   __ bind(exit);
1150   continuation_enter_cleanup(masm);
1151   __ leave();
1152   __ ret(lr);
1153 
1154   /// exception handling
1155 
1156   exception_offset = __ pc() - start;
1157   {
1158       __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1159 
1160       continuation_enter_cleanup(masm);
1161 
1162       __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1163       __ authenticate_return_address(c_rarg1);

1227     continuation_enter_cleanup(masm);
1228 
1229     __ bind(pinned); // pinned -- return to caller
1230 
1231     // handle pending exception thrown by freeze
1232     __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1233     Label ok;
1234     __ cbz(rscratch1, ok);
1235     __ leave();
1236     __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1237     __ br(rscratch1);
1238     __ bind(ok);
1239 
1240     __ leave();
1241     __ ret(lr);
1242 
1243     OopMap* map = new OopMap(framesize, 1);
1244     oop_maps->add_gc_map(the_pc - start, map);
1245 }
1246 
1247 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1248   ::continuation_enter_cleanup(masm);
1249 }
1250 
1251 static void gen_special_dispatch(MacroAssembler* masm,
1252                                  const methodHandle& method,
1253                                  const BasicType* sig_bt,
1254                                  const VMRegPair* regs) {
1255   verify_oop_args(masm, method, sig_bt, regs);
1256   vmIntrinsics::ID iid = method->intrinsic_id();
1257 
1258   // Now write the args into the outgoing interpreter space
1259   bool     has_receiver   = false;
1260   Register receiver_reg   = noreg;
1261   int      member_arg_pos = -1;
1262   Register member_reg     = noreg;
1263   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1264   if (ref_kind != 0) {
1265     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1266     member_reg = r19;  // known to be free at this point
1267     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1268   } else if (iid == vmIntrinsics::_invokeBasic) {
1269     has_receiver = true;
1270   } else if (iid == vmIntrinsics::_linkToNative) {

1782       __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1783 
1784       // Hmm should this move to the slow path code area???
1785 
1786       // Test if the oopMark is an obvious stack pointer, i.e.,
1787       //  1) (mark & 3) == 0, and
1788       //  2) sp <= mark < mark + os::pagesize()
1789       // These 3 tests can be done by evaluating the following
1790       // expression: ((mark - sp) & (3 - os::vm_page_size())),
1791       // assuming both stack pointer and pagesize have their
1792       // least significant 2 bits clear.
1793       // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1794 
1795       __ sub(swap_reg, sp, swap_reg);
1796       __ neg(swap_reg, swap_reg);
1797       __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1798 
1799       // Save the test result, for recursive case, the result is zero
1800       __ str(swap_reg, Address(lock_reg, mark_word_offset));
1801       __ br(Assembler::NE, slow_path_lock);
1802       __ b(lock_done);
1803     } else {
1804       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1805       __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1806       __ b(lock_done);
1807     }
1808     __ bind(count);
1809     __ inc_held_monitor_count();
1810 
1811     // Slow path will re-enter here
1812     __ bind(lock_done);
1813   }
1814 
1815 
1816   // Finally just about ready to make the JNI call
1817 
1818   // get JNIEnv* which is first argument to native
1819   __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1820 
1821   // Now set thread in native
1822   __ mov(rscratch1, _thread_in_native);
1823   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1824   __ stlrw(rscratch1, rscratch2);
1825 
1826   __ rt_call(native_func);
1827 
1828   __ bind(native_return);
1829 

1902   __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1903   __ br(Assembler::EQ, reguard);
1904   __ bind(reguard_done);
1905 
1906   // native result if any is live
1907 
1908   // Unlock
1909   Label unlock_done;
1910   Label slow_path_unlock;
1911   if (method->is_synchronized()) {
1912 
1913     // Get locked oop from the handle we passed to jni
1914     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1915 
1916     Label done, not_recursive;
1917 
1918     if (LockingMode == LM_LEGACY) {
1919       // Simple recursive lock?
1920       __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1921       __ cbnz(rscratch1, not_recursive);

1922       __ b(done);
1923     }
1924 
1925     __ bind(not_recursive);
1926 
1927     // Must save r0 if if it is live now because cmpxchg must use it
1928     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1929       save_native_result(masm, ret_type, stack_slots);
1930     }
1931 
1932     if (LockingMode == LM_MONITOR) {
1933       __ b(slow_path_unlock);
1934     } else if (LockingMode == LM_LEGACY) {
1935       // get address of the stack lock
1936       __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1937       //  get old displaced header
1938       __ ldr(old_hdr, Address(r0, 0));
1939 
1940       // Atomic swap old header if oop still contains the stack lock
1941       Label count;
1942       __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1943       __ bind(count);
1944       __ dec_held_monitor_count();
1945     } else {
1946       assert(LockingMode == LM_LIGHTWEIGHT, "");
1947       __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);

1948     }
1949 
1950     // slow path re-enters here
1951     __ bind(unlock_done);
1952     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1953       restore_native_result(masm, ret_type, stack_slots);
1954     }
1955 
1956     __ bind(done);
1957   }
1958 
1959   Label dtrace_method_exit, dtrace_method_exit_done;
1960   {
1961     uint64_t offset;
1962     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1963     __ ldrb(rscratch1, Address(rscratch1, offset));
1964     __ cbnzw(rscratch1, dtrace_method_exit);
1965     __ bind(dtrace_method_exit_done);
1966   }
1967 

2536   if (EnableJVMCI) {
2537     _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2538     _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2539   }
2540 #endif
2541 }
2542 
2543 // Number of stack slots between incoming argument block and the start of
2544 // a new frame.  The PROLOG must add this many slots to the stack.  The
2545 // EPILOG must remove this many slots. aarch64 needs two slots for
2546 // return address and fp.
2547 // TODO think this is correct but check
2548 uint SharedRuntime::in_preserve_stack_slots() {
2549   return 4;
2550 }
2551 
2552 uint SharedRuntime::out_preserve_stack_slots() {
2553   return 0;
2554 }
2555 
2556 VMReg SharedRuntime::thread_register() {
2557   return rthread->as_VMReg();
2558 }
2559 
2560 #ifdef COMPILER2
2561 //------------------------------generate_uncommon_trap_blob--------------------
2562 void SharedRuntime::generate_uncommon_trap_blob() {
2563   // Allocate space for the code
2564   ResourceMark rm;
2565   // Setup code generation tools
2566   CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2567   MacroAssembler* masm = new MacroAssembler(&buffer);
2568 
2569   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2570 
2571   address start = __ pc();
2572 
2573   // Push self-frame.  We get here with a return address in LR
2574   // and sp should be 16 byte aligned
2575   // push rfp and retaddr by hand
2576   __ protect_return_address();
2577   __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2578   // we don't expect an arg reg save area
2579 #ifndef PRODUCT
< prev index next >