1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 // no precompiled headers
 27 #include "asm/macroAssembler.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "code/icBuffer.hpp"
 31 #include "code/nativeInst.hpp"
 32 #include "code/vtableStubs.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "jvm.h"
 35 #include "memory/allocation.inline.hpp"
 36 #include "os_share_linux.hpp"
 37 #include "prims/jniFastGetField.hpp"
 38 #include "prims/jvm_misc.hpp"
 39 #include "runtime/arguments.hpp"
 40 #include "runtime/frame.inline.hpp"
 41 #include "runtime/interfaceSupport.inline.hpp"
 42 #include "runtime/java.hpp"
 43 #include "runtime/javaCalls.hpp"
 44 #include "runtime/mutexLocker.hpp"
 45 #include "runtime/osThread.hpp"
 46 #include "runtime/safepointMechanism.hpp"
 47 #include "runtime/sharedRuntime.hpp"
 48 #include "runtime/stubRoutines.hpp"
 49 #include "runtime/thread.inline.hpp"
 50 #include "runtime/timer.hpp"
 51 #include "signals_posix.hpp"
 52 #include "utilities/debug.hpp"
 53 #include "utilities/events.hpp"
 54 #include "utilities/vmError.hpp"
 55 
 56 // put OS-includes here
 57 # include <dlfcn.h>
 58 # include <fpu_control.h>
 59 # include <errno.h>
 60 # include <pthread.h>
 61 # include <signal.h>
 62 # include <stdio.h>
 63 # include <stdlib.h>
 64 # include <sys/mman.h>
 65 # include <sys/resource.h>
 66 # include <sys/socket.h>
 67 # include <sys/stat.h>
 68 # include <sys/time.h>
 69 # include <sys/types.h>
 70 # include <sys/utsname.h>
 71 # include <sys/wait.h>
 72 # include <poll.h>
 73 # include <pwd.h>
 74 # include <ucontext.h>
 75 # include <unistd.h>
 76 
 77 #define REG_LR       1
 78 #define REG_FP       8
 79 
 80 NOINLINE address os::current_stack_pointer() {
 81   return (address)__builtin_frame_address(0);
 82 }
 83 
 84 char* os::non_memory_address_word() {
 85   // Must never look like an address returned by reserve_memory,
 86   return (char*) -1;
 87 }
 88 
 89 address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
 90   return (address)uc->uc_mcontext.__gregs[REG_PC];
 91 }
 92 
 93 void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
 94   uc->uc_mcontext.__gregs[REG_PC] = (intptr_t)pc;
 95 }
 96 
 97 intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
 98   return (intptr_t*)uc->uc_mcontext.__gregs[REG_SP];
 99 }
100 
101 intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
102   return (intptr_t*)uc->uc_mcontext.__gregs[REG_FP];
103 }
104 
105 address os::fetch_frame_from_context(const void* ucVoid,
106                                      intptr_t** ret_sp, intptr_t** ret_fp) {
107   address epc;
108   const ucontext_t* uc = (const ucontext_t*)ucVoid;
109 
110   if (uc != NULL) {
111     epc = os::Posix::ucontext_get_pc(uc);
112     if (ret_sp != NULL) {
113       *ret_sp = os::Linux::ucontext_get_sp(uc);
114     }
115     if (ret_fp != NULL) {
116       *ret_fp = os::Linux::ucontext_get_fp(uc);
117     }
118   } else {
119     epc = NULL;
120     if (ret_sp != NULL) {
121       *ret_sp = (intptr_t *)NULL;
122     }
123     if (ret_fp != NULL) {
124       *ret_fp = (intptr_t *)NULL;
125     }
126   }
127 
128   return epc;
129 }
130 
131 frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
132   const ucontext_t* uc = (const ucontext_t*)ucVoid;
133   // In compiled code, the stack banging is performed before RA
134   // has been saved in the frame. RA is live, and SP and FP
135   // belong to the caller.
136   intptr_t* frame_fp = os::Linux::ucontext_get_fp(uc);
137   intptr_t* frame_sp = os::Linux::ucontext_get_sp(uc);
138   address frame_pc = (address)(uc->uc_mcontext.__gregs[REG_LR]
139                          - NativeInstruction::instruction_size);
140   return frame(frame_sp, frame_fp, frame_pc);
141 }
142 
143 frame os::fetch_frame_from_context(const void* ucVoid) {
144   intptr_t* frame_sp = NULL;
145   intptr_t* frame_fp = NULL;
146   address epc = fetch_frame_from_context(ucVoid, &frame_sp, &frame_fp);
147   return frame(frame_sp, frame_fp, epc);
148 }
149 
150 // By default, gcc always saves frame pointer rfp on this stack. This
151 // may get turned off by -fomit-frame-pointer.
152 frame os::get_sender_for_C_frame(frame* fr) {
153   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
154 }
155 
156 NOINLINE frame os::current_frame() {
157   intptr_t **sender_sp = (intptr_t **)__builtin_frame_address(0);
158   if (sender_sp != NULL) {
159     frame myframe((intptr_t*)os::current_stack_pointer(),
160                   sender_sp[frame::link_offset],
161                   CAST_FROM_FN_PTR(address, os::current_frame));
162     if (os::is_first_C_frame(&myframe)) {
163       // stack is not walkable
164       return frame();
165     } else {
166       return os::get_sender_for_C_frame(&myframe);
167     }
168   } else {
169     ShouldNotReachHere();
170     return frame();
171   }
172 }
173 
174 // Utility functions
175 bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
176                                              ucontext_t* uc, JavaThread* thread) {
177 
178   // decide if this trap can be handled by a stub
179   address stub = NULL;
180 
181   address pc = NULL;
182 
183   //%note os_trap_1
184   if (info != NULL && uc != NULL && thread != NULL) {
185     pc = (address) os::Posix::ucontext_get_pc(uc);
186 
187     address addr = (address) info->si_addr;
188 
189     // Make sure the high order byte is sign extended, as it may be masked away by the hardware.
190     if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
191       addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
192     }
193 
194     // Handle ALL stack overflow variations here
195     if (sig == SIGSEGV) {
196       // check if fault address is within thread stack
197       if (thread->is_in_full_stack(addr)) {
198         if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
199           return true; // continue
200         }
201       }
202     }
203 
204     if (thread->thread_state() == _thread_in_Java) {
205       // Java thread running in Java code => find exception handler if any
206       // a fault inside compiled code, the interpreter, or a stub
207 
208       // Handle signal from NativeJump::patch_verified_entry().
209       if ((sig == SIGILL || sig == SIGTRAP)
210           && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
211         if (TraceTraps) {
212           tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
213         }
214         stub = SharedRuntime::get_handle_wrong_method_stub();
215       } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
216         stub = SharedRuntime::get_poll_stub(pc);
217       } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
218         // BugId 4454115: A read from a MappedByteBuffer can fault
219         // here if the underlying file has been truncated.
220         // Do not crash the VM in such a case.
221         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
222         CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
223         bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
224         if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
225           address next_pc = pc + NativeCall::instruction_size;
226           if (is_unsafe_arraycopy) {
227             next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
228           }
229           stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
230         }
231       } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
232         // Pull a pointer to the error message out of the instruction
233         // stream.
234         const uint64_t *detail_msg_ptr
235           = (uint64_t*)(pc + NativeInstruction::instruction_size);
236         const char *detail_msg = (const char *)*detail_msg_ptr;
237         const char *msg = "stop";
238         if (TraceTraps) {
239           tty->print_cr("trap: %s: (SIGILL)", msg);
240         }
241 
242         // End life with a fatal error, message and detail message and the context.
243         // Note: no need to do any post-processing here (e.g. signal chaining)
244         va_list va_dummy;
245         VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy);
246         va_end(va_dummy);
247 
248         ShouldNotReachHere();
249       } else if (sig == SIGFPE  &&
250           (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
251         stub =
252           SharedRuntime::
253           continuation_for_implicit_exception(thread,
254                                               pc,
255                                               SharedRuntime::
256                                               IMPLICIT_DIVIDE_BY_ZERO);
257       } else if (sig == SIGSEGV &&
258                  MacroAssembler::uses_implicit_null_check((void*)addr)) {
259           // Determination of interpreter/vtable stub/compiled code null exception
260           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
261       }
262     } else if ((thread->thread_state() == _thread_in_vm ||
263                 thread->thread_state() == _thread_in_native) &&
264                 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
265                 thread->doing_unsafe_access()) {
266       address next_pc = pc + NativeCall::instruction_size;
267       if (UnsafeCopyMemory::contains_pc(pc)) {
268         next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
269       }
270       stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
271     }
272 
273     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
274     // and the heap gets shrunk before the field access.
275     if ((sig == SIGSEGV) || (sig == SIGBUS)) {
276       address addr_slow = JNI_FastGetField::find_slowcase_pc(pc);
277       if (addr_slow != (address)-1) {
278         stub = addr_slow;
279       }
280     }
281   }
282 
283   if (stub != NULL) {
284     // save all thread context in case we need to restore it
285     if (thread != NULL) {
286       thread->set_saved_exception_pc(pc);
287     }
288 
289     os::Posix::ucontext_set_pc(uc, stub);
290     return true;
291   }
292 
293   return false; // Mute compiler
294 }
295 
296 void os::Linux::init_thread_fpu_state(void) {
297 }
298 
299 int os::Linux::get_fpu_control_word(void) {
300   return 0;
301 }
302 
303 void os::Linux::set_fpu_control_word(int fpu_control) {
304 }
305 
306 
307 ////////////////////////////////////////////////////////////////////////////////
308 // thread stack
309 
310 // Minimum usable stack sizes required to get to user code. Space for
311 // HotSpot guard pages is added later.
312 size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
313 size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
314 size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
315 
316 // return default stack size for thr_type
317 size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
318   // default stack size (compiler thread needs larger stack)
319   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
320   return s;
321 }
322 
323 /////////////////////////////////////////////////////////////////////////////
324 // helper functions for fatal error handler
325 
326 static const char* reg_abi_names[] = {
327   "pc",
328   "x1(ra)", "x2(sp)", "x3(gp)", "x4(tp)",
329   "x5(t0)", "x6(t1)", "x7(t2)",
330   "x8(s0)", "x9(s1)",
331   "x10(a0)", "x11(a1)", "x12(a2)", "x13(a3)", "x14(a4)", "x15(a5)", "x16(a6)", "x17(a7)",
332   "x18(s2)", "x19(s3)", "x20(s4)", "x21(s5)", "x22(s6)", "x23(s7)", "x24(s8)", "x25(s9)", "x26(s10)", "x27(s11)",
333   "x28(t3)", "x29(t4)","x30(t5)", "x31(t6)"
334 };
335 
336 void os::print_context(outputStream *st, const void *context) {
337   if (context == NULL) {
338     return;
339   }
340 
341   const ucontext_t *uc = (const ucontext_t*)context;
342   st->print_cr("Registers:");
343   for (int r = 0; r < 32; r++) {
344     st->print("%-*.*s=", 8, 8, reg_abi_names[r]);
345     print_location(st, uc->uc_mcontext.__gregs[r]);
346   }
347   st->cr();
348 
349   intptr_t *frame_sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
350   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(frame_sp));
351   print_hex_dump(st, (address)frame_sp, (address)(frame_sp + 64), sizeof(intptr_t));
352   st->cr();
353 
354   // Note: it may be unsafe to inspect memory near pc. For example, pc may
355   // point to garbage if entry point in an nmethod is corrupted. Leave
356   // this at the end, and hope for the best.
357   address pc = os::Posix::ucontext_get_pc(uc);
358   print_instructions(st, pc, sizeof(char));
359   st->cr();
360 }
361 
362 void os::print_register_info(outputStream *st, const void *context) {
363   if (context == NULL) {
364     return;
365   }
366 
367   const ucontext_t *uc = (const ucontext_t*)context;
368 
369   st->print_cr("Register to memory mapping:");
370   st->cr();
371 
372   // this is horrendously verbose but the layout of the registers in the
373   // context does not match how we defined our abstract Register set, so
374   // we can't just iterate through the gregs area
375 
376   // this is only for the "general purpose" registers
377 
378   for (int r = 0; r < 32; r++)
379     st->print_cr("%-*.*s=" INTPTR_FORMAT, 8, 8, reg_abi_names[r], (uintptr_t)uc->uc_mcontext.__gregs[r]);
380   st->cr();
381 }
382 
383 void os::setup_fpu() {
384 }
385 
386 #ifndef PRODUCT
387 void os::verify_stack_alignment() {
388   assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
389 }
390 #endif
391 
392 int os::extra_bang_size_in_bytes() {
393   return 0;
394 }
395 
396 extern "C" {
397   int SpinPause() {
398     return 0;
399   }
400 
401   void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
402     if (from > to) {
403       const jshort *end = from + count;
404       while (from < end) {
405         *(to++) = *(from++);
406       }
407     } else if (from < to) {
408       const jshort *end = from;
409       from += count - 1;
410       to   += count - 1;
411       while (from >= end) {
412         *(to--) = *(from--);
413       }
414     }
415   }
416   void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
417     if (from > to) {
418       const jint *end = from + count;
419       while (from < end) {
420         *(to++) = *(from++);
421       }
422     } else if (from < to) {
423       const jint *end = from;
424       from += count - 1;
425       to   += count - 1;
426       while (from >= end) {
427         *(to--) = *(from--);
428       }
429     }
430   }
431   void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
432     if (from > to) {
433       const jlong *end = from + count;
434       while (from < end) {
435         os::atomic_copy64(from++, to++);
436       }
437     } else if (from < to) {
438       const jlong *end = from;
439       from += count - 1;
440       to   += count - 1;
441       while (from >= end) {
442         os::atomic_copy64(from--, to--);
443       }
444     }
445   }
446 
447   void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
448                                     HeapWord* to,
449                                     size_t    count) {
450     memmove(to, from, count);
451   }
452   void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
453                                       HeapWord* to,
454                                       size_t    count) {
455     memmove(to, from, count * 2);
456   }
457   void _Copy_arrayof_conjoint_jints(const HeapWord* from,
458                                     HeapWord* to,
459                                     size_t    count) {
460     memmove(to, from, count * 4);
461   }
462   void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
463                                      HeapWord* to,
464                                      size_t    count) {
465     memmove(to, from, count * 8);
466   }
467 };