1162 stack_slots = 2; // will be adjusted in setup
1163 OopMap* map = continuation_enter_setup(masm, stack_slots);
1164 frame_complete = __ pc() - start;
1165
1166 fill_continuation_entry(masm);
1167
1168 __ cbnz(c_rarg2, call_thaw);
1169
1170 const address tr_call = __ trampoline_call(resolve);
1171 if (tr_call == nullptr) {
1172 fatal("CodeCache is full at gen_continuation_enter");
1173 }
1174
1175 oop_maps->add_gc_map(__ pc() - start, map);
1176 __ post_call_nop();
1177
1178 __ b(exit);
1179
1180 __ bind(call_thaw);
1181
1182 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1183 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1184 ContinuationEntry::_return_pc_offset = __ pc() - start;
1185 __ post_call_nop();
1186
1187 __ bind(exit);
1188 continuation_enter_cleanup(masm);
1189 __ leave();
1190 __ ret(lr);
1191
1192 /// exception handling
1193
1194 exception_offset = __ pc() - start;
1195 {
1196 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1197
1198 continuation_enter_cleanup(masm);
1199
1200 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1201 __ authenticate_return_address(c_rarg1);
1264 continuation_enter_cleanup(masm);
1265
1266 __ bind(pinned); // pinned -- return to caller
1267
1268 // handle pending exception thrown by freeze
1269 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1270 Label ok;
1271 __ cbz(rscratch1, ok);
1272 __ leave();
1273 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1274 __ br(rscratch1);
1275 __ bind(ok);
1276
1277 __ leave();
1278 __ ret(lr);
1279
1280 OopMap* map = new OopMap(framesize, 1);
1281 oop_maps->add_gc_map(the_pc - start, map);
1282 }
1283
1284 static void gen_special_dispatch(MacroAssembler* masm,
1285 const methodHandle& method,
1286 const BasicType* sig_bt,
1287 const VMRegPair* regs) {
1288 verify_oop_args(masm, method, sig_bt, regs);
1289 vmIntrinsics::ID iid = method->intrinsic_id();
1290
1291 // Now write the args into the outgoing interpreter space
1292 bool has_receiver = false;
1293 Register receiver_reg = noreg;
1294 int member_arg_pos = -1;
1295 Register member_reg = noreg;
1296 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1297 if (ref_kind != 0) {
1298 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1299 member_reg = r19; // known to be free at this point
1300 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1301 } else if (iid == vmIntrinsics::_invokeBasic) {
1302 has_receiver = true;
1303 } else if (iid == vmIntrinsics::_linkToNative) {
1815 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1816
1817 // Hmm should this move to the slow path code area???
1818
1819 // Test if the oopMark is an obvious stack pointer, i.e.,
1820 // 1) (mark & 3) == 0, and
1821 // 2) sp <= mark < mark + os::pagesize()
1822 // These 3 tests can be done by evaluating the following
1823 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1824 // assuming both stack pointer and pagesize have their
1825 // least significant 2 bits clear.
1826 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1827
1828 __ sub(swap_reg, sp, swap_reg);
1829 __ neg(swap_reg, swap_reg);
1830 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1831
1832 // Save the test result, for recursive case, the result is zero
1833 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1834 __ br(Assembler::NE, slow_path_lock);
1835 } else {
1836 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1837 __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1838 }
1839 __ bind(count);
1840 __ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
1841
1842 // Slow path will re-enter here
1843 __ bind(lock_done);
1844 }
1845
1846
1847 // Finally just about ready to make the JNI call
1848
1849 // get JNIEnv* which is first argument to native
1850 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1851
1852 // Now set thread in native
1853 __ mov(rscratch1, _thread_in_native);
1854 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1855 __ stlrw(rscratch1, rscratch2);
1856
1857 __ rt_call(native_func);
1858
1859 __ bind(native_return);
1860
1933 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1934 __ br(Assembler::EQ, reguard);
1935 __ bind(reguard_done);
1936
1937 // native result if any is live
1938
1939 // Unlock
1940 Label unlock_done;
1941 Label slow_path_unlock;
1942 if (method->is_synchronized()) {
1943
1944 // Get locked oop from the handle we passed to jni
1945 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1946
1947 Label done, not_recursive;
1948
1949 if (LockingMode == LM_LEGACY) {
1950 // Simple recursive lock?
1951 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1952 __ cbnz(rscratch1, not_recursive);
1953 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1954 __ b(done);
1955 }
1956
1957 __ bind(not_recursive);
1958
1959 // Must save r0 if if it is live now because cmpxchg must use it
1960 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1961 save_native_result(masm, ret_type, stack_slots);
1962 }
1963
1964 if (LockingMode == LM_MONITOR) {
1965 __ b(slow_path_unlock);
1966 } else if (LockingMode == LM_LEGACY) {
1967 // get address of the stack lock
1968 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1969 // get old displaced header
1970 __ ldr(old_hdr, Address(r0, 0));
1971
1972 // Atomic swap old header if oop still contains the stack lock
1973 Label count;
1974 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1975 __ bind(count);
1976 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1977 } else {
1978 assert(LockingMode == LM_LIGHTWEIGHT, "");
1979 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1980 __ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
1981 }
1982
1983 // slow path re-enters here
1984 __ bind(unlock_done);
1985 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1986 restore_native_result(masm, ret_type, stack_slots);
1987 }
1988
1989 __ bind(done);
1990 }
1991
1992 Label dtrace_method_exit, dtrace_method_exit_done;
1993 {
1994 uint64_t offset;
1995 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1996 __ ldrb(rscratch1, Address(rscratch1, offset));
1997 __ cbnzw(rscratch1, dtrace_method_exit);
1998 __ bind(dtrace_method_exit_done);
1999 }
2000
2569 if (EnableJVMCI) {
2570 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2571 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2572 }
2573 #endif
2574 }
2575
2576 // Number of stack slots between incoming argument block and the start of
2577 // a new frame. The PROLOG must add this many slots to the stack. The
2578 // EPILOG must remove this many slots. aarch64 needs two slots for
2579 // return address and fp.
2580 // TODO think this is correct but check
2581 uint SharedRuntime::in_preserve_stack_slots() {
2582 return 4;
2583 }
2584
2585 uint SharedRuntime::out_preserve_stack_slots() {
2586 return 0;
2587 }
2588
2589 #ifdef COMPILER2
2590 //------------------------------generate_uncommon_trap_blob--------------------
2591 void SharedRuntime::generate_uncommon_trap_blob() {
2592 // Allocate space for the code
2593 ResourceMark rm;
2594 // Setup code generation tools
2595 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2596 MacroAssembler* masm = new MacroAssembler(&buffer);
2597
2598 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2599
2600 address start = __ pc();
2601
2602 // Push self-frame. We get here with a return address in LR
2603 // and sp should be 16 byte aligned
2604 // push rfp and retaddr by hand
2605 __ protect_return_address();
2606 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2607 // we don't expect an arg reg save area
2608 #ifndef PRODUCT
|
1162 stack_slots = 2; // will be adjusted in setup
1163 OopMap* map = continuation_enter_setup(masm, stack_slots);
1164 frame_complete = __ pc() - start;
1165
1166 fill_continuation_entry(masm);
1167
1168 __ cbnz(c_rarg2, call_thaw);
1169
1170 const address tr_call = __ trampoline_call(resolve);
1171 if (tr_call == nullptr) {
1172 fatal("CodeCache is full at gen_continuation_enter");
1173 }
1174
1175 oop_maps->add_gc_map(__ pc() - start, map);
1176 __ post_call_nop();
1177
1178 __ b(exit);
1179
1180 __ bind(call_thaw);
1181
1182 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1183 __ rt_call(CAST_FROM_FN_PTR(address, StubRoutines::cont_thaw()));
1184 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1185 ContinuationEntry::_return_pc_offset = __ pc() - start;
1186 __ post_call_nop();
1187
1188 __ bind(exit);
1189 continuation_enter_cleanup(masm);
1190 __ leave();
1191 __ ret(lr);
1192
1193 /// exception handling
1194
1195 exception_offset = __ pc() - start;
1196 {
1197 __ mov(r19, r0); // save return value contaning the exception oop in callee-saved R19
1198
1199 continuation_enter_cleanup(masm);
1200
1201 __ ldr(c_rarg1, Address(rfp, wordSize)); // return address
1202 __ authenticate_return_address(c_rarg1);
1265 continuation_enter_cleanup(masm);
1266
1267 __ bind(pinned); // pinned -- return to caller
1268
1269 // handle pending exception thrown by freeze
1270 __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1271 Label ok;
1272 __ cbz(rscratch1, ok);
1273 __ leave();
1274 __ lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
1275 __ br(rscratch1);
1276 __ bind(ok);
1277
1278 __ leave();
1279 __ ret(lr);
1280
1281 OopMap* map = new OopMap(framesize, 1);
1282 oop_maps->add_gc_map(the_pc - start, map);
1283 }
1284
1285 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1286 ::continuation_enter_cleanup(masm);
1287 }
1288
1289 static void gen_special_dispatch(MacroAssembler* masm,
1290 const methodHandle& method,
1291 const BasicType* sig_bt,
1292 const VMRegPair* regs) {
1293 verify_oop_args(masm, method, sig_bt, regs);
1294 vmIntrinsics::ID iid = method->intrinsic_id();
1295
1296 // Now write the args into the outgoing interpreter space
1297 bool has_receiver = false;
1298 Register receiver_reg = noreg;
1299 int member_arg_pos = -1;
1300 Register member_reg = noreg;
1301 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1302 if (ref_kind != 0) {
1303 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1304 member_reg = r19; // known to be free at this point
1305 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1306 } else if (iid == vmIntrinsics::_invokeBasic) {
1307 has_receiver = true;
1308 } else if (iid == vmIntrinsics::_linkToNative) {
1820 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
1821
1822 // Hmm should this move to the slow path code area???
1823
1824 // Test if the oopMark is an obvious stack pointer, i.e.,
1825 // 1) (mark & 3) == 0, and
1826 // 2) sp <= mark < mark + os::pagesize()
1827 // These 3 tests can be done by evaluating the following
1828 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1829 // assuming both stack pointer and pagesize have their
1830 // least significant 2 bits clear.
1831 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1832
1833 __ sub(swap_reg, sp, swap_reg);
1834 __ neg(swap_reg, swap_reg);
1835 __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
1836
1837 // Save the test result, for recursive case, the result is zero
1838 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1839 __ br(Assembler::NE, slow_path_lock);
1840 __ b(lock_done);
1841 } else {
1842 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1843 __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
1844 __ b(lock_done);
1845 }
1846 __ bind(count);
1847 __ inc_held_monitor_count();
1848
1849 // Slow path will re-enter here
1850 __ bind(lock_done);
1851 }
1852
1853
1854 // Finally just about ready to make the JNI call
1855
1856 // get JNIEnv* which is first argument to native
1857 __ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1858
1859 // Now set thread in native
1860 __ mov(rscratch1, _thread_in_native);
1861 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1862 __ stlrw(rscratch1, rscratch2);
1863
1864 __ rt_call(native_func);
1865
1866 __ bind(native_return);
1867
1940 __ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
1941 __ br(Assembler::EQ, reguard);
1942 __ bind(reguard_done);
1943
1944 // native result if any is live
1945
1946 // Unlock
1947 Label unlock_done;
1948 Label slow_path_unlock;
1949 if (method->is_synchronized()) {
1950
1951 // Get locked oop from the handle we passed to jni
1952 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1953
1954 Label done, not_recursive;
1955
1956 if (LockingMode == LM_LEGACY) {
1957 // Simple recursive lock?
1958 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1959 __ cbnz(rscratch1, not_recursive);
1960 __ b(done);
1961 }
1962
1963 __ bind(not_recursive);
1964
1965 // Must save r0 if if it is live now because cmpxchg must use it
1966 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1967 save_native_result(masm, ret_type, stack_slots);
1968 }
1969
1970 if (LockingMode == LM_MONITOR) {
1971 __ b(slow_path_unlock);
1972 } else if (LockingMode == LM_LEGACY) {
1973 // get address of the stack lock
1974 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1975 // get old displaced header
1976 __ ldr(old_hdr, Address(r0, 0));
1977
1978 // Atomic swap old header if oop still contains the stack lock
1979 Label count;
1980 __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
1981 __ bind(count);
1982 __ dec_held_monitor_count();
1983 } else {
1984 assert(LockingMode == LM_LIGHTWEIGHT, "");
1985 __ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
1986 }
1987
1988 // slow path re-enters here
1989 __ bind(unlock_done);
1990 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1991 restore_native_result(masm, ret_type, stack_slots);
1992 }
1993
1994 __ bind(done);
1995 }
1996
1997 Label dtrace_method_exit, dtrace_method_exit_done;
1998 {
1999 uint64_t offset;
2000 __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2001 __ ldrb(rscratch1, Address(rscratch1, offset));
2002 __ cbnzw(rscratch1, dtrace_method_exit);
2003 __ bind(dtrace_method_exit_done);
2004 }
2005
2574 if (EnableJVMCI) {
2575 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2576 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2577 }
2578 #endif
2579 }
2580
2581 // Number of stack slots between incoming argument block and the start of
2582 // a new frame. The PROLOG must add this many slots to the stack. The
2583 // EPILOG must remove this many slots. aarch64 needs two slots for
2584 // return address and fp.
2585 // TODO think this is correct but check
2586 uint SharedRuntime::in_preserve_stack_slots() {
2587 return 4;
2588 }
2589
2590 uint SharedRuntime::out_preserve_stack_slots() {
2591 return 0;
2592 }
2593
2594 VMReg SharedRuntime::thread_register() {
2595 return rthread->as_VMReg();
2596 }
2597
2598 #ifdef COMPILER2
2599 //------------------------------generate_uncommon_trap_blob--------------------
2600 void SharedRuntime::generate_uncommon_trap_blob() {
2601 // Allocate space for the code
2602 ResourceMark rm;
2603 // Setup code generation tools
2604 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
2605 MacroAssembler* masm = new MacroAssembler(&buffer);
2606
2607 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
2608
2609 address start = __ pc();
2610
2611 // Push self-frame. We get here with a return address in LR
2612 // and sp should be 16 byte aligned
2613 // push rfp and retaddr by hand
2614 __ protect_return_address();
2615 __ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize)));
2616 // we don't expect an arg reg save area
2617 #ifndef PRODUCT
|