1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 // no precompiled headers
 27 #include "asm/macroAssembler.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "code/icBuffer.hpp"
 31 #include "code/nativeInst.hpp"
 32 #include "code/vtableStubs.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "jvm.h"
 35 #include "memory/allocation.inline.hpp"
 36 #include "os_share_linux.hpp"
 37 #include "prims/jniFastGetField.hpp"
 38 #include "prims/jvm_misc.hpp"
 39 #include "runtime/arguments.hpp"
 40 #include "runtime/frame.inline.hpp"
 41 #include "runtime/interfaceSupport.inline.hpp"
 42 #include "runtime/java.hpp"
 43 #include "runtime/javaCalls.hpp"
 44 #include "runtime/mutexLocker.hpp"
 45 #include "runtime/osThread.hpp"
 46 #include "runtime/safepointMechanism.hpp"
 47 #include "runtime/sharedRuntime.hpp"
 48 #include "runtime/stubRoutines.hpp"
 49 #include "runtime/thread.inline.hpp"
 50 #include "runtime/timer.hpp"
 51 #include "signals_posix.hpp"
 52 #include "utilities/debug.hpp"
 53 #include "utilities/events.hpp"
 54 #include "utilities/vmError.hpp"
 55 
 56 // put OS-includes here
 57 # include <dlfcn.h>
 58 # include <fpu_control.h>
 59 # include <errno.h>
 60 # include <pthread.h>
 61 # include <signal.h>
 62 # include <stdio.h>
 63 # include <stdlib.h>
 64 # include <sys/mman.h>
 65 # include <sys/resource.h>
 66 # include <sys/socket.h>
 67 # include <sys/stat.h>
 68 # include <sys/time.h>
 69 # include <sys/types.h>
 70 # include <sys/utsname.h>
 71 # include <sys/wait.h>
 72 # include <poll.h>
 73 # include <pwd.h>
 74 # include <ucontext.h>
 75 # include <unistd.h>
 76 
 77 #define REG_LR       1
 78 #define REG_FP       8
 79 
 80 NOINLINE address os::current_stack_pointer() {
 81   return (address)__builtin_frame_address(0);
 82 }
 83 
 84 char* os::non_memory_address_word() {
 85   // Must never look like an address returned by reserve_memory,
 86   return (char*) -1;
 87 }
 88 
 89 address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
 90   return (address)uc->uc_mcontext.__gregs[REG_PC];
 91 }
 92 
 93 void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
 94   uc->uc_mcontext.__gregs[REG_PC] = (intptr_t)pc;
 95 }
 96 
 97 intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
 98   return (intptr_t*)uc->uc_mcontext.__gregs[REG_SP];
 99 }
100 
101 intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
102   return (intptr_t*)uc->uc_mcontext.__gregs[REG_FP];
103 }
104 
105 address os::fetch_frame_from_context(const void* ucVoid,
106                                      intptr_t** ret_sp, intptr_t** ret_fp) {
107   address epc;
108   const ucontext_t* uc = (const ucontext_t*)ucVoid;
109 
110   if (uc != NULL) {
111     epc = os::Posix::ucontext_get_pc(uc);
112     if (ret_sp != NULL) {
113       *ret_sp = os::Linux::ucontext_get_sp(uc);
114     }
115     if (ret_fp != NULL) {
116       *ret_fp = os::Linux::ucontext_get_fp(uc);
117     }
118   } else {
119     epc = NULL;
120     if (ret_sp != NULL) {
121       *ret_sp = (intptr_t *)NULL;
122     }
123     if (ret_fp != NULL) {
124       *ret_fp = (intptr_t *)NULL;
125     }
126   }
127 
128   return epc;
129 }
130 
131 frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
132   const ucontext_t* uc = (const ucontext_t*)ucVoid;
133   // In compiled code, the stack banging is performed before RA
134   // has been saved in the frame. RA is live, and SP and FP
135   // belong to the caller.
136   intptr_t* frame_fp = os::Linux::ucontext_get_fp(uc);
137   intptr_t* frame_sp = os::Linux::ucontext_get_sp(uc);
138   address frame_pc = (address)(uc->uc_mcontext.__gregs[REG_LR]
139                          - NativeInstruction::instruction_size);
140   return frame(frame_sp, frame_fp, frame_pc);
141 }
142 
143 frame os::fetch_frame_from_context(const void* ucVoid) {
144   intptr_t* frame_sp = NULL;
145   intptr_t* frame_fp = NULL;
146   address epc = fetch_frame_from_context(ucVoid, &frame_sp, &frame_fp);
147   return frame(frame_sp, frame_fp, epc);
148 }
149 
150 // By default, gcc always saves frame pointer rfp on this stack. This
151 // may get turned off by -fomit-frame-pointer.
152 frame os::get_sender_for_C_frame(frame* fr) {
153   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
154 }
155 
156 NOINLINE frame os::current_frame() {
157   intptr_t **sender_sp = (intptr_t **)__builtin_frame_address(0);
158   if (sender_sp != NULL) {
159     frame myframe((intptr_t*)os::current_stack_pointer(),
160                   sender_sp[frame::link_offset],
161                   CAST_FROM_FN_PTR(address, os::current_frame));
162     if (os::is_first_C_frame(&myframe)) {
163       // stack is not walkable
164       return frame();
165     } else {
166       return os::get_sender_for_C_frame(&myframe);
167     }
168   } else {
169     ShouldNotReachHere();
170     return frame();
171   }
172 }
173 
174 // Utility functions
175 bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
176                                              ucontext_t* uc, JavaThread* thread) {
177 
178   // decide if this trap can be handled by a stub
179   address stub = NULL;
180 
181   address pc = NULL;
182 
183   //%note os_trap_1
184   if (info != NULL && uc != NULL && thread != NULL) {
185     pc = (address) os::Posix::ucontext_get_pc(uc);
186 
187     address addr = (address) info->si_addr;
188 
189     // Make sure the high order byte is sign extended, as it may be masked away by the hardware.
190     if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
191       addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
192     }
193 
194     // Handle ALL stack overflow variations here
195     if (sig == SIGSEGV) {
196       // check if fault address is within thread stack
197       if (thread->is_in_full_stack(addr)) {
198         if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
199           return true; // continue
200         }
201       }
202     }
203 
204     if (thread->thread_state() == _thread_in_Java) {
205       // Java thread running in Java code => find exception handler if any
206       // a fault inside compiled code, the interpreter, or a stub
207 
208       // Handle signal from NativeJump::patch_verified_entry().
209       if ((sig == SIGILL || sig == SIGTRAP)
210           && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
211         if (TraceTraps) {
212           tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
213         }
214         stub = SharedRuntime::get_handle_wrong_method_stub();
215       } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
216         stub = SharedRuntime::get_poll_stub(pc);
217       } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
218         // BugId 4454115: A read from a MappedByteBuffer can fault
219         // here if the underlying file has been truncated.
220         // Do not crash the VM in such a case.
221         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
222         CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
223         bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
224         if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
225           address next_pc = pc + NativeCall::instruction_size;
226           if (is_unsafe_arraycopy) {
227             next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
228           }
229           stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
230         }
231       } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
232         // Pull a pointer to the error message out of the instruction
233         // stream.
234         const uint64_t *detail_msg_ptr
235           = (uint64_t*)(pc + NativeInstruction::instruction_size);
236         const char *detail_msg = (const char *)*detail_msg_ptr;
237         const char *msg = "stop";
238         if (TraceTraps) {
239           tty->print_cr("trap: %s: (SIGILL)", msg);
240         }
241 
242         // End life with a fatal error, message and detail message and the context.
243         // Note: no need to do any post-processing here (e.g. signal chaining)
244         va_list va_dummy;
245         VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy);
246         va_end(va_dummy);
247 
248         ShouldNotReachHere();
249       } else if (sig == SIGFPE  &&
250           (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
251         stub =
252           SharedRuntime::
253           continuation_for_implicit_exception(thread,
254                                               pc,
255                                               SharedRuntime::
256                                               IMPLICIT_DIVIDE_BY_ZERO);
257       } else if (sig == SIGSEGV &&
258                  MacroAssembler::uses_implicit_null_check((void*)addr)) {
259           // Determination of interpreter/vtable stub/compiled code null exception
260           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
261       }
262     } else if ((thread->thread_state() == _thread_in_vm ||
263                 thread->thread_state() == _thread_in_native) &&
264                 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
265                 thread->doing_unsafe_access()) {
266       address next_pc = pc + NativeCall::instruction_size;
267       if (UnsafeCopyMemory::contains_pc(pc)) {
268         next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
269       }
270       stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
271     }
272 
273     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
274     // and the heap gets shrunk before the field access.
275     if ((sig == SIGSEGV) || (sig == SIGBUS)) {
276       address addr_slow = JNI_FastGetField::find_slowcase_pc(pc);
277       if (addr_slow != (address)-1) {
278         stub = addr_slow;
279       }
280     }
281   }
282 
283   if (stub != NULL) {
284     // save all thread context in case we need to restore it
285     if (thread != NULL) {
286       thread->set_saved_exception_pc(pc);
287     }
288 
289     os::Posix::ucontext_set_pc(uc, stub);
290     return true;
291   }
292 
293   return false; // Mute compiler
294 }
295 
296 void os::Linux::init_thread_fpu_state(void) {
297 }
298 
299 int os::Linux::get_fpu_control_word(void) {
300   return 0;
301 }
302 
303 void os::Linux::set_fpu_control_word(int fpu_control) {
304 }
305 
306 ////////////////////////////////////////////////////////////////////////////////
307 // thread stack
308 
309 // Minimum usable stack sizes required to get to user code. Space for
310 // HotSpot guard pages is added later.
311 size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
312 size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
313 size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
314 
315 // return default stack size for thr_type
316 size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
317   // default stack size (compiler thread needs larger stack)
318   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
319   return s;
320 }
321 
322 /////////////////////////////////////////////////////////////////////////////
323 // helper functions for fatal error handler
324 
325 static const char* reg_abi_names[] = {
326   "pc",
327   "x1(ra)", "x2(sp)", "x3(gp)", "x4(tp)",
328   "x5(t0)", "x6(t1)", "x7(t2)",
329   "x8(s0)", "x9(s1)",
330   "x10(a0)", "x11(a1)", "x12(a2)", "x13(a3)", "x14(a4)", "x15(a5)", "x16(a6)", "x17(a7)",
331   "x18(s2)", "x19(s3)", "x20(s4)", "x21(s5)", "x22(s6)", "x23(s7)", "x24(s8)", "x25(s9)", "x26(s10)", "x27(s11)",
332   "x28(t3)", "x29(t4)","x30(t5)", "x31(t6)"
333 };
334 
335 void os::print_context(outputStream *st, const void *context) {
336   if (context == NULL) {
337     return;
338   }
339 
340   const ucontext_t *uc = (const ucontext_t*)context;
341   st->print_cr("Registers:");
342   for (int r = 0; r < 32; r++) {
343     st->print("%-*.*s=", 8, 8, reg_abi_names[r]);
344     print_location(st, uc->uc_mcontext.__gregs[r]);
345   }
346   st->cr();
347 
348   intptr_t *frame_sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
349   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(frame_sp));
350   print_hex_dump(st, (address)frame_sp, (address)(frame_sp + 64), sizeof(intptr_t));
351   st->cr();
352 
353   // Note: it may be unsafe to inspect memory near pc. For example, pc may
354   // point to garbage if entry point in an nmethod is corrupted. Leave
355   // this at the end, and hope for the best.
356   address pc = os::Posix::ucontext_get_pc(uc);
357   print_instructions(st, pc, UseRVC ? sizeof(char) : 4/*non-compressed native instruction size*/);
358   st->cr();
359 }
360 
361 void os::print_register_info(outputStream *st, const void *context) {
362   if (context == NULL) {
363     return;
364   }
365 
366   const ucontext_t *uc = (const ucontext_t*)context;
367 
368   st->print_cr("Register to memory mapping:");
369   st->cr();
370 
371   // this is horrendously verbose but the layout of the registers in the
372   // context does not match how we defined our abstract Register set, so
373   // we can't just iterate through the gregs area
374 
375   // this is only for the "general purpose" registers
376 
377   for (int r = 0; r < 32; r++)
378     st->print_cr("%-*.*s=" INTPTR_FORMAT, 8, 8, reg_abi_names[r], (uintptr_t)uc->uc_mcontext.__gregs[r]);
379   st->cr();
380 }
381 
382 void os::setup_fpu() {
383 }
384 
385 #ifndef PRODUCT
386 void os::verify_stack_alignment() {
387   assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
388 }
389 #endif
390 
391 int os::extra_bang_size_in_bytes() {
392   return 0;
393 }
394 
395 extern "C" {
396   int SpinPause() {
397     return 0;
398   }
399 
400   void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
401     if (from > to) {
402       const jshort *end = from + count;
403       while (from < end) {
404         *(to++) = *(from++);
405       }
406     } else if (from < to) {
407       const jshort *end = from;
408       from += count - 1;
409       to   += count - 1;
410       while (from >= end) {
411         *(to--) = *(from--);
412       }
413     }
414   }
415   void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
416     if (from > to) {
417       const jint *end = from + count;
418       while (from < end) {
419         *(to++) = *(from++);
420       }
421     } else if (from < to) {
422       const jint *end = from;
423       from += count - 1;
424       to   += count - 1;
425       while (from >= end) {
426         *(to--) = *(from--);
427       }
428     }
429   }
430   void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
431     if (from > to) {
432       const jlong *end = from + count;
433       while (from < end) {
434         os::atomic_copy64(from++, to++);
435       }
436     } else if (from < to) {
437       const jlong *end = from;
438       from += count - 1;
439       to   += count - 1;
440       while (from >= end) {
441         os::atomic_copy64(from--, to--);
442       }
443     }
444   }
445 
446   void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
447                                     HeapWord* to,
448                                     size_t    count) {
449     memmove(to, from, count);
450   }
451   void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
452                                       HeapWord* to,
453                                       size_t    count) {
454     memmove(to, from, count * 2);
455   }
456   void _Copy_arrayof_conjoint_jints(const HeapWord* from,
457                                     HeapWord* to,
458                                     size_t    count) {
459     memmove(to, from, count * 4);
460   }
461   void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
462                                      HeapWord* to,
463                                      size_t    count) {
464     memmove(to, from, count * 8);
465   }
466 };