1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 // no precompiled headers
 27 #include "asm/macroAssembler.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "code/icBuffer.hpp"
 31 #include "code/nativeInst.hpp"
 32 #include "code/vtableStubs.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "jvm.h"
 35 #include "memory/allocation.inline.hpp"
 36 #include "os_share_linux.hpp"
 37 #include "prims/jniFastGetField.hpp"
 38 #include "prims/jvm_misc.hpp"
 39 #include "runtime/arguments.hpp"
 40 #include "runtime/frame.inline.hpp"
 41 #include "runtime/interfaceSupport.inline.hpp"
 42 #include "runtime/java.hpp"
 43 #include "runtime/javaCalls.hpp"
 44 #include "runtime/mutexLocker.hpp"
 45 #include "runtime/osThread.hpp"
 46 #include "runtime/safepointMechanism.hpp"
 47 #include "runtime/sharedRuntime.hpp"
 48 #include "runtime/stubRoutines.hpp"
 49 #include "runtime/thread.inline.hpp"
 50 #include "runtime/timer.hpp"
 51 #include "signals_posix.hpp"
 52 #include "utilities/debug.hpp"
 53 #include "utilities/events.hpp"
 54 #include "utilities/vmError.hpp"
 55 
 56 // put OS-includes here
 57 # include <dlfcn.h>
 58 # include <fpu_control.h>
 59 # include <errno.h>
 60 # include <pthread.h>
 61 # include <signal.h>
 62 # include <stdio.h>
 63 # include <stdlib.h>
 64 # include <sys/mman.h>
 65 # include <sys/resource.h>
 66 # include <sys/socket.h>
 67 # include <sys/stat.h>
 68 # include <sys/time.h>
 69 # include <sys/types.h>
 70 # include <sys/utsname.h>
 71 # include <sys/wait.h>
 72 # include <poll.h>
 73 # include <pwd.h>
 74 # include <ucontext.h>
 75 # include <unistd.h>
 76 
 77 #define REG_LR       1
 78 #define REG_FP       8
 79 
 80 NOINLINE address os::current_stack_pointer() {
 81   return (address)__builtin_frame_address(0);
 82 }
 83 
 84 char* os::non_memory_address_word() {
 85   // Must never look like an address returned by reserve_memory,
 86   return (char*) -1;
 87 }
 88 
 89 address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
 90   return (address)uc->uc_mcontext.__gregs[REG_PC];
 91 }
 92 
 93 void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
 94   uc->uc_mcontext.__gregs[REG_PC] = (intptr_t)pc;
 95 }
 96 
 97 intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
 98   return (intptr_t*)uc->uc_mcontext.__gregs[REG_SP];
 99 }
100 
101 intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
102   return (intptr_t*)uc->uc_mcontext.__gregs[REG_FP];
103 }
104 
105 address os::fetch_frame_from_context(const void* ucVoid,
106                                      intptr_t** ret_sp, intptr_t** ret_fp) {
107 
108   address epc;
109   const ucontext_t* uc = (const ucontext_t*)ucVoid;
110 
111   if (uc != NULL) {
112     epc = os::Posix::ucontext_get_pc(uc);
113     if (ret_sp != NULL) {
114       *ret_sp = os::Linux::ucontext_get_sp(uc);
115     }
116     if (ret_fp != NULL) {
117       *ret_fp = os::Linux::ucontext_get_fp(uc);
118     }
119   } else {
120     epc = NULL;
121     if (ret_sp != NULL) {
122       *ret_sp = (intptr_t *)NULL;
123     }
124     if (ret_fp != NULL) {
125       *ret_fp = (intptr_t *)NULL;
126     }
127   }
128 
129   return epc;
130 }
131 
132 frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
133   const ucontext_t* uc = (const ucontext_t*)ucVoid;
134   // In compiled code, the stack banging is performed before LR
135   // has been saved in the frame. LR is live, and SP and FP
136   // belong to the caller.
137   intptr_t* frame_fp = os::Linux::ucontext_get_fp(uc);
138   intptr_t* frame_sp = os::Linux::ucontext_get_sp(uc);
139   address frame_pc = (address)(uc->uc_mcontext.__gregs[REG_LR]
140                          - NativeInstruction::instruction_size);
141   return frame(frame_sp, frame_fp, frame_pc);
142 }
143 
144 frame os::fetch_frame_from_context(const void* ucVoid) {
145   intptr_t* frame_sp = NULL;
146   intptr_t* frame_fp = NULL;
147   address epc = fetch_frame_from_context(ucVoid, &frame_sp, &frame_fp);
148   return frame(frame_sp, frame_fp, epc);
149 }
150 
151 // By default, gcc always saves frame pointer rfp on this stack. This
152 // may get turned off by -fomit-frame-pointer.
153 frame os::get_sender_for_C_frame(frame* fr) {
154   return frame(fr->c_frame_sender_sp(), fr->c_frame_link(), fr->c_frame_sender_pc());
155 }
156 
157 NOINLINE frame os::current_frame() {
158   intptr_t **sender_sp = (intptr_t **)__builtin_frame_address(0);
159   if(sender_sp != NULL) {
160     frame myframe((intptr_t*)os::current_stack_pointer(),
161                   sender_sp[frame::c_frame_link_offset],
162                   CAST_FROM_FN_PTR(address, os::current_frame));
163     if (os::is_first_C_frame(&myframe)) {
164       // stack is not walkable
165       return frame();
166     } else {
167       return os::get_sender_for_C_frame(&myframe);
168     }
169   } else {
170     ShouldNotReachHere();
171     return frame();
172   }
173 }
174 
175 bool os::is_first_C_frame(frame* fr) {
176   // Load up sp, fp, sender sp and sender fp, check for reasonable values.
177   // Check usp first, because if that's bad the other accessors may fault
178   // on some architectures.  Ditto ufp second, etc.
179   uintptr_t fp_align_mask = (uintptr_t)(sizeof(address) - 1);
180   // sp on amd can be 32 bit aligned.
181   uintptr_t sp_align_mask = (uintptr_t)(sizeof(int) - 1);
182 
183   uintptr_t usp    = (uintptr_t)fr->sp();
184   if ((usp & sp_align_mask) != 0) {
185     return true;
186   }
187 
188   uintptr_t ufp    = (uintptr_t)fr->fp();
189   if ((ufp & fp_align_mask) != 0) {
190     return true;
191   }
192 
193   uintptr_t old_sp = (uintptr_t)fr->c_frame_sender_sp();
194   if ((old_sp & sp_align_mask) != 0) {
195     return true;
196   }
197   if (old_sp == 0 || old_sp == (uintptr_t)-1) {
198     return true;
199   }
200 
201   uintptr_t old_fp = (uintptr_t)fr->c_frame_link();
202   if ((old_fp & fp_align_mask) != 0) {
203     return true;
204   }
205   if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) {
206     return true;
207   }
208 
209   // stack grows downwards; if old_fp is below current fp or if the stack
210   // frame is too large, either the stack is corrupted or fp is not saved
211   // on stack (i.e. on x86, ebp may be used as general register). The stack
212   // is not walkable beyond current frame.
213   if (old_fp < ufp) {
214     return true;
215   }
216   if (old_fp - ufp > 64 * K) {
217     return true;
218   }
219 
220   return false;
221 }
222 
223 int os::get_native_stack(address* stack, int frames, int toSkip) {
224   int frame_idx = 0;
225   int num_of_frames = 0;  // number of frames captured
226   frame fr = os::current_frame();
227   while (fr.pc() && frame_idx < frames) {
228     if (toSkip > 0) {
229       toSkip --;
230     } else {
231       stack[frame_idx ++] = fr.pc();
232     }
233     if (fr.fp() == NULL || fr.cb() != NULL ||
234         fr.c_frame_sender_pc() == NULL || os::is_first_C_frame(&fr)) {
235       break;
236     }
237 
238     if (fr.c_frame_sender_pc() && !os::is_first_C_frame(&fr)) {
239       fr = os::get_sender_for_C_frame(&fr);
240     } else {
241       break;
242     }
243   }
244   num_of_frames = frame_idx;
245   for (; frame_idx < frames; frame_idx ++) {
246     stack[frame_idx] = NULL;
247   }
248 
249   return num_of_frames;
250 }
251 
252 // Utility functions
253 bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
254                                              ucontext_t* uc, JavaThread* thread) {
255 
256   // decide if this trap can be handled by a stub
257   address stub = NULL;
258 
259   address pc = NULL;
260 
261   //%note os_trap_1
262   if (info != NULL && uc != NULL && thread != NULL) {
263     pc = (address) os::Posix::ucontext_get_pc(uc);
264 
265     address addr = (address) info->si_addr;
266 
267     // Make sure the high order byte is sign extended, as it may be masked away by the hardware.
268     if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
269       addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
270     }
271 
272     // Handle ALL stack overflow variations here
273     if (sig == SIGSEGV) {
274       // check if fault address is within thread stack
275       if (thread->is_in_full_stack(addr)) {
276         if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
277           return true; // continue
278         }
279       }
280     }
281 
282     if (sig == SIGILL && VM_Version::is_checkvext_fault(pc)) {
283       os::Posix::ucontext_set_pc(uc, VM_Version::continuation_for_checkvext_fault(pc));
284       return true;
285     }
286 
287 
288     if (thread->thread_state() == _thread_in_Java) {
289       // Java thread running in Java code => find exception handler if any
290       // a fault inside compiled code, the interpreter, or a stub
291 
292       // Handle signal from NativeJump::patch_verified_entry().
293       if ((sig == SIGILL || sig == SIGTRAP)
294           && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
295         if (TraceTraps) {
296           tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
297         }
298         stub = SharedRuntime::get_handle_wrong_method_stub();
299       } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
300         stub = SharedRuntime::get_poll_stub(pc);
301       } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
302         // BugId 4454115: A read from a MappedByteBuffer can fault
303         // here if the underlying file has been truncated.
304         // Do not crash the VM in such a case.
305         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
306         CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
307         bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
308         if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
309           address next_pc = pc + NativeCall::instruction_size;
310           if (is_unsafe_arraycopy) {
311             next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
312           }
313           stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
314         }
315       } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
316         // Pull a pointer to the error message out of the instruction
317         // stream.
318         const uint64_t *detail_msg_ptr
319           = (uint64_t*)(pc + NativeInstruction::instruction_size);
320         const char *detail_msg = (const char *)*detail_msg_ptr;
321         const char *msg = "stop";
322         if (TraceTraps) {
323           tty->print_cr("trap: %s: (SIGILL)", msg);
324         }
325 
326         // End life with a fatal error, message and detail message and the context.
327         // Note: no need to do any post-processing here (e.g. signal chaining)
328         va_list va_dummy;
329         VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy);
330         va_end(va_dummy);
331 
332         ShouldNotReachHere();
333       } else if (sig == SIGFPE  &&
334           (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
335         stub =
336           SharedRuntime::
337           continuation_for_implicit_exception(thread,
338                                               pc,
339                                               SharedRuntime::
340                                               IMPLICIT_DIVIDE_BY_ZERO);
341       } else if (sig == SIGSEGV &&
342                  MacroAssembler::uses_implicit_null_check((void*)addr)) {
343           // Determination of interpreter/vtable stub/compiled code null exception
344           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
345       }
346     } else if ((thread->thread_state() == _thread_in_vm ||
347                 thread->thread_state() == _thread_in_native) &&
348                 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
349                 thread->doing_unsafe_access()) {
350       address next_pc = pc + NativeCall::instruction_size;
351       if (UnsafeCopyMemory::contains_pc(pc)) {
352         next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
353       }
354       stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
355     }
356 
357     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
358     // and the heap gets shrunk before the field access.
359     if ((sig == SIGSEGV) || (sig == SIGBUS)) {
360       address addr_slow = JNI_FastGetField::find_slowcase_pc(pc);
361       if (addr_slow != (address)-1) {
362         stub = addr_slow;
363       }
364     }
365   }
366 
367   if (stub != NULL) {
368     // save all thread context in case we need to restore it
369     if (thread != NULL) {
370       thread->set_saved_exception_pc(pc);
371     }
372 
373     os::Posix::ucontext_set_pc(uc, stub);
374     return true;
375   }
376 
377   return false; // Mute compiler
378 }
379 
380 void os::Linux::init_thread_fpu_state(void) {
381 }
382 
383 int os::Linux::get_fpu_control_word(void) {
384   return 0;
385 }
386 
387 void os::Linux::set_fpu_control_word(int fpu_control) {
388 }
389 
390 
391 ////////////////////////////////////////////////////////////////////////////////
392 // thread stack
393 
394 // Minimum usable stack sizes required to get to user code. Space for
395 // HotSpot guard pages is added later.
396 size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
397 size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
398 size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
399 
400 // return default stack size for thr_type
401 size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
402   // default stack size (compiler thread needs larger stack)
403   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
404   return s;
405 }
406 
407 /////////////////////////////////////////////////////////////////////////////
408 // helper functions for fatal error handler
409 
410 static const char* reg_abi_names[] = {
411   "pc",
412   "x1(ra)", "x2(sp)", "x3(gp)", "x4(tp)",
413   "x5(t0)", "x6(t1)", "x7(t2)",
414   "x8(s0)", "x9(s1)",
415   "x10(a0)", "x11(a1)", "x12(a2)", "x13(a3)", "x14(a4)", "x15(a5)", "x16(a6)", "x17(a7)",
416   "x18(s2)", "x19(s3)", "x20(s4)", "x21(s5)", "x22(s6)", "x23(s7)", "x24(s8)", "x25(s9)", "x26(s10)", "x27(s11)",
417   "x28(t3)", "x29(t4)","x30(t5)", "x31(t6)"
418 };
419 
420 void os::print_context(outputStream *st, const void *context) {
421   if (context == NULL) {
422     return;
423   }
424 
425   const ucontext_t *uc = (const ucontext_t*)context;
426   st->print_cr("Registers:");
427   for (int r = 0; r < 32; r++) {
428     st->print("%-*.*s=", 8, 8, reg_abi_names[r]);
429     print_location(st, uc->uc_mcontext.__gregs[r]);
430   }
431   st->cr();
432 
433   intptr_t *frame_sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
434   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(frame_sp));
435   print_hex_dump(st, (address)frame_sp, (address)(frame_sp + 64), sizeof(intptr_t));
436   st->cr();
437 
438   // Note: it may be unsafe to inspect memory near pc. For example, pc may
439   // point to garbage if entry point in an nmethod is corrupted. Leave
440   // this at the end, and hope for the best.
441   address pc = os::Posix::ucontext_get_pc(uc);
442   print_instructions(st, pc, sizeof(char));
443   st->cr();
444 }
445 
446 void os::print_register_info(outputStream *st, const void *context) {
447   if (context == NULL) {
448     return;
449   }
450 
451   const ucontext_t *uc = (const ucontext_t*)context;
452 
453   st->print_cr("Register to memory mapping:");
454   st->cr();
455 
456   // this is horrendously verbose but the layout of the registers in the
457   // context does not match how we defined our abstract Register set, so
458   // we can't just iterate through the gregs area
459 
460   // this is only for the "general purpose" registers
461 
462   for (int r = 0; r < 32; r++)
463     st->print_cr("%-*.*s=" INTPTR_FORMAT, 8, 8, reg_abi_names[r], (uintptr_t)uc->uc_mcontext.__gregs[r]);
464   st->cr();
465 }
466 
467 void os::setup_fpu() {
468 }
469 
470 #ifndef PRODUCT
471 void os::verify_stack_alignment() {
472   assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
473 }
474 #endif
475 
476 int os::extra_bang_size_in_bytes() {
477   return 0;
478 }
479 
480 extern "C" {
481   int SpinPause() {
482     return 0;
483   }
484 
485   void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
486     if (from > to) {
487       const jshort *end = from + count;
488       while (from < end) {
489         *(to++) = *(from++);
490       }
491     } else if (from < to) {
492       const jshort *end = from;
493       from += count - 1;
494       to   += count - 1;
495       while (from >= end) {
496         *(to--) = *(from--);
497       }
498     }
499   }
500   void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
501     if (from > to) {
502       const jint *end = from + count;
503       while (from < end) {
504         *(to++) = *(from++);
505       }
506     } else if (from < to) {
507       const jint *end = from;
508       from += count - 1;
509       to   += count - 1;
510       while (from >= end) {
511         *(to--) = *(from--);
512       }
513     }
514   }
515   void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
516     if (from > to) {
517       const jlong *end = from + count;
518       while (from < end) {
519         os::atomic_copy64(from++, to++);
520       }
521     } else if (from < to) {
522       const jlong *end = from;
523       from += count - 1;
524       to   += count - 1;
525       while (from >= end) {
526         os::atomic_copy64(from--, to--);
527       }
528     }
529   }
530 
531   void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
532                                     HeapWord* to,
533                                     size_t    count) {
534     memmove(to, from, count);
535   }
536   void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
537                                       HeapWord* to,
538                                       size_t    count) {
539     memmove(to, from, count * 2);
540   }
541   void _Copy_arrayof_conjoint_jints(const HeapWord* from,
542                                     HeapWord* to,
543                                     size_t    count) {
544     memmove(to, from, count * 4);
545   }
546   void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
547                                      HeapWord* to,
548                                      size_t    count) {
549     memmove(to, from, count * 8);
550   }
551 };