1 /*
   2  * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2015, 2022 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // no precompiled headers
  27 #include "jvm.h"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvmtifiles/jvmti.h"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_linux.inline.hpp"
  40 #include "os_posix.inline.hpp"
  41 #include "os_share_linux.hpp"
  42 #include "osContainer_linux.hpp"
  43 #include "prims/jniFastGetField.hpp"
  44 #include "prims/jvm_misc.hpp"
  45 #include "runtime/arguments.hpp"
  46 #include "runtime/atomic.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/globals_extension.hpp"
  49 #include "runtime/interfaceSupport.inline.hpp"
  50 #include "runtime/init.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/osThread.hpp"
  56 #include "runtime/perfMemory.hpp"
  57 #include "runtime/sharedRuntime.hpp"
  58 #include "runtime/statSampler.hpp"
  59 #include "runtime/stubRoutines.hpp"
  60 #include "runtime/thread.inline.hpp"
  61 #include "runtime/threadCritical.hpp"
  62 #include "runtime/threadSMR.hpp"
  63 #include "runtime/timer.hpp"
  64 #include "runtime/vm_version.hpp"
  65 #include "signals_posix.hpp"
  66 #include "semaphore_posix.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/elfFile.hpp"
  74 #include "utilities/growableArray.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/powerOfTwo.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here
  80 # include <sys/types.h>
  81 # include <sys/mman.h>
  82 # include <sys/stat.h>
  83 # include <sys/select.h>
  84 # include <pthread.h>
  85 # include <signal.h>
  86 # include <endian.h>
  87 # include <errno.h>
  88 # include <dlfcn.h>
  89 # include <stdio.h>
  90 # include <unistd.h>
  91 # include <sys/resource.h>
  92 # include <pthread.h>
  93 # include <sys/stat.h>
  94 # include <sys/time.h>
  95 # include <sys/times.h>
  96 # include <sys/utsname.h>
  97 # include <sys/socket.h>
  98 # include <pwd.h>
  99 # include <poll.h>
 100 # include <fcntl.h>
 101 # include <string.h>
 102 # include <syscall.h>
 103 # include <sys/sysinfo.h>
 104 # include <sys/ipc.h>
 105 # include <sys/shm.h>
 106 # include <link.h>
 107 # include <stdint.h>
 108 # include <inttypes.h>
 109 # include <sys/ioctl.h>
 110 # include <linux/elf-em.h>
 111 #ifdef __GLIBC__
 112 # include <malloc.h>
 113 #endif
 114 
 115 #ifndef _GNU_SOURCE
 116   #define _GNU_SOURCE
 117   #include <sched.h>
 118   #undef _GNU_SOURCE
 119 #else
 120   #include <sched.h>
 121 #endif
 122 
 123 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 124 // getrusage() is prepared to handle the associated failure.
 125 #ifndef RUSAGE_THREAD
 126   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 127 #endif
 128 
 129 #define MAX_PATH    (2 * K)
 130 
 131 #define MAX_SECS 100000000
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 
 136 #ifdef MUSL_LIBC
 137 // dlvsym is not a part of POSIX
 138 // and musl libc doesn't implement it.
 139 static void *dlvsym(void *handle,
 140                     const char *symbol,
 141                     const char *version) {
 142    // load the latest version of symbol
 143    return dlsym(handle, symbol);
 144 }
 145 #endif
 146 
 147 enum CoredumpFilterBit {
 148   FILE_BACKED_PVT_BIT = 1 << 2,
 149   FILE_BACKED_SHARED_BIT = 1 << 3,
 150   LARGEPAGES_BIT = 1 << 6,
 151   DAX_SHARED_BIT = 1 << 8
 152 };
 153 
 154 ////////////////////////////////////////////////////////////////////////////////
 155 // global variables
 156 julong os::Linux::_physical_memory = 0;
 157 
 158 address   os::Linux::_initial_thread_stack_bottom = NULL;
 159 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 160 
 161 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 162 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 163 pthread_t os::Linux::_main_thread;
 164 int os::Linux::_page_size = -1;
 165 bool os::Linux::_supports_fast_thread_cpu_time = false;
 166 const char * os::Linux::_libc_version = NULL;
 167 const char * os::Linux::_libpthread_version = NULL;
 168 size_t os::Linux::_default_large_page_size = 0;
 169 
 170 #ifdef __GLIBC__
 171 os::Linux::mallinfo_func_t os::Linux::_mallinfo = NULL;
 172 os::Linux::mallinfo2_func_t os::Linux::_mallinfo2 = NULL;
 173 #endif // __GLIBC__
 174 
 175 static int clock_tics_per_sec = 100;
 176 
 177 // If the VM might have been created on the primordial thread, we need to resolve the
 178 // primordial thread stack bounds and check if the current thread might be the
 179 // primordial thread in places. If we know that the primordial thread is never used,
 180 // such as when the VM was created by one of the standard java launchers, we can
 181 // avoid this
 182 static bool suppress_primordial_thread_resolution = false;
 183 
 184 // utility functions
 185 
 186 julong os::available_memory() {
 187   return Linux::available_memory();
 188 }
 189 
 190 julong os::Linux::available_memory() {
 191   // values in struct sysinfo are "unsigned long"
 192   struct sysinfo si;
 193   julong avail_mem;
 194 
 195   if (OSContainer::is_containerized()) {
 196     jlong mem_limit, mem_usage;
 197     if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
 198       log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
 199                              mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
 200     }
 201     if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
 202       log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
 203     }
 204     if (mem_limit > 0 && mem_usage > 0 ) {
 205       avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
 206       log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
 207       return avail_mem;
 208     }
 209   }
 210 
 211   sysinfo(&si);
 212   avail_mem = (julong)si.freeram * si.mem_unit;
 213   log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
 214   return avail_mem;
 215 }
 216 
 217 julong os::physical_memory() {
 218   jlong phys_mem = 0;
 219   if (OSContainer::is_containerized()) {
 220     jlong mem_limit;
 221     if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
 222       log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
 223       return mem_limit;
 224     }
 225     log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
 226                             mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
 227   }
 228 
 229   phys_mem = Linux::physical_memory();
 230   log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
 231   return phys_mem;
 232 }
 233 
 234 static uint64_t initial_total_ticks = 0;
 235 static uint64_t initial_steal_ticks = 0;
 236 static bool     has_initial_tick_info = false;
 237 
 238 static void next_line(FILE *f) {
 239   int c;
 240   do {
 241     c = fgetc(f);
 242   } while (c != '\n' && c != EOF);
 243 }
 244 
 245 bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
 246   FILE*         fh;
 247   uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
 248   // since at least kernel 2.6 : iowait: time waiting for I/O to complete
 249   // irq: time  servicing interrupts; softirq: time servicing softirqs
 250   uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
 251   // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
 252   uint64_t      stealTicks = 0;
 253   // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
 254   // control of the Linux kernel
 255   uint64_t      guestNiceTicks = 0;
 256   int           logical_cpu = -1;
 257   const int     required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
 258   int           n;
 259 
 260   memset(pticks, 0, sizeof(CPUPerfTicks));
 261 
 262   if ((fh = os::fopen("/proc/stat", "r")) == NULL) {
 263     return false;
 264   }
 265 
 266   if (which_logical_cpu == -1) {
 267     n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 268             UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 269             UINT64_FORMAT " " UINT64_FORMAT " ",
 270             &userTicks, &niceTicks, &systemTicks, &idleTicks,
 271             &iowTicks, &irqTicks, &sirqTicks,
 272             &stealTicks, &guestNiceTicks);
 273   } else {
 274     // Move to next line
 275     next_line(fh);
 276 
 277     // find the line for requested cpu faster to just iterate linefeeds?
 278     for (int i = 0; i < which_logical_cpu; i++) {
 279       next_line(fh);
 280     }
 281 
 282     n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 283                UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 284                UINT64_FORMAT " " UINT64_FORMAT " ",
 285                &logical_cpu, &userTicks, &niceTicks,
 286                &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
 287                &stealTicks, &guestNiceTicks);
 288   }
 289 
 290   fclose(fh);
 291   if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
 292     return false;
 293   }
 294   pticks->used       = userTicks + niceTicks;
 295   pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
 296   pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
 297                        iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
 298 
 299   if (n > required_tickinfo_count + 3) {
 300     pticks->steal = stealTicks;
 301     pticks->has_steal_ticks = true;
 302   } else {
 303     pticks->steal = 0;
 304     pticks->has_steal_ticks = false;
 305   }
 306 
 307   return true;
 308 }
 309 
 310 #ifndef SYS_gettid
 311 // i386: 224, ia64: 1105, amd64: 186, sparc: 143
 312   #ifdef __ia64__
 313     #define SYS_gettid 1105
 314   #else
 315     #ifdef __i386__
 316       #define SYS_gettid 224
 317     #else
 318       #ifdef __amd64__
 319         #define SYS_gettid 186
 320       #else
 321         #ifdef __sparc__
 322           #define SYS_gettid 143
 323         #else
 324           #error define gettid for the arch
 325         #endif
 326       #endif
 327     #endif
 328   #endif
 329 #endif
 330 
 331 
 332 // pid_t gettid()
 333 //
 334 // Returns the kernel thread id of the currently running thread. Kernel
 335 // thread id is used to access /proc.
 336 pid_t os::Linux::gettid() {
 337   int rslt = syscall(SYS_gettid);
 338   assert(rslt != -1, "must be."); // old linuxthreads implementation?
 339   return (pid_t)rslt;
 340 }
 341 
 342 // Most versions of linux have a bug where the number of processors are
 343 // determined by looking at the /proc file system.  In a chroot environment,
 344 // the system call returns 1.
 345 static bool unsafe_chroot_detected = false;
 346 static const char *unstable_chroot_error = "/proc file system not found.\n"
 347                      "Java may be unstable running multithreaded in a chroot "
 348                      "environment on Linux when /proc filesystem is not mounted.";
 349 
 350 void os::Linux::initialize_system_info() {
 351   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 352   if (processor_count() == 1) {
 353     pid_t pid = os::Linux::gettid();
 354     char fname[32];
 355     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
 356     FILE *fp = os::fopen(fname, "r");
 357     if (fp == NULL) {
 358       unsafe_chroot_detected = true;
 359     } else {
 360       fclose(fp);
 361     }
 362   }
 363   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 364   assert(processor_count() > 0, "linux error");
 365 }
 366 
 367 void os::init_system_properties_values() {
 368   // The next steps are taken in the product version:
 369   //
 370   // Obtain the JAVA_HOME value from the location of libjvm.so.
 371   // This library should be located at:
 372   // <JAVA_HOME>/lib/{client|server}/libjvm.so.
 373   //
 374   // If "/jre/lib/" appears at the right place in the path, then we
 375   // assume libjvm.so is installed in a JDK and we use this path.
 376   //
 377   // Otherwise exit with message: "Could not create the Java virtual machine."
 378   //
 379   // The following extra steps are taken in the debugging version:
 380   //
 381   // If "/jre/lib/" does NOT appear at the right place in the path
 382   // instead of exit check for $JAVA_HOME environment variable.
 383   //
 384   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 385   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 386   // it looks like libjvm.so is installed there
 387   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 388   //
 389   // Otherwise exit.
 390   //
 391   // Important note: if the location of libjvm.so changes this
 392   // code needs to be changed accordingly.
 393 
 394   // See ld(1):
 395   //      The linker uses the following search paths to locate required
 396   //      shared libraries:
 397   //        1: ...
 398   //        ...
 399   //        7: The default directories, normally /lib and /usr/lib.
 400 #ifndef OVERRIDE_LIBPATH
 401   #if defined(_LP64)
 402     #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
 403   #else
 404     #define DEFAULT_LIBPATH "/lib:/usr/lib"
 405   #endif
 406 #else
 407   #define DEFAULT_LIBPATH OVERRIDE_LIBPATH
 408 #endif
 409 
 410 // Base path of extensions installed on the system.
 411 #define SYS_EXT_DIR     "/usr/java/packages"
 412 #define EXTENSIONS_DIR  "/lib/ext"
 413 
 414   // Buffer that fits several sprintfs.
 415   // Note that the space for the colon and the trailing null are provided
 416   // by the nulls included by the sizeof operator.
 417   const size_t bufsize =
 418     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 419          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 420   char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 421 
 422   // sysclasspath, java_home, dll_dir
 423   {
 424     char *pslash;
 425     os::jvm_path(buf, bufsize);
 426 
 427     // Found the full path to libjvm.so.
 428     // Now cut the path to <java_home>/jre if we can.
 429     pslash = strrchr(buf, '/');
 430     if (pslash != NULL) {
 431       *pslash = '\0';            // Get rid of /libjvm.so.
 432     }
 433     pslash = strrchr(buf, '/');
 434     if (pslash != NULL) {
 435       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 436     }
 437     Arguments::set_dll_dir(buf);
 438 
 439     if (pslash != NULL) {
 440       pslash = strrchr(buf, '/');
 441       if (pslash != NULL) {
 442         *pslash = '\0';        // Get rid of /lib.
 443       }
 444     }
 445     Arguments::set_java_home(buf);
 446     if (!set_boot_path('/', ':')) {
 447       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 448     }
 449   }
 450 
 451   // Where to look for native libraries.
 452   //
 453   // Note: Due to a legacy implementation, most of the library path
 454   // is set in the launcher. This was to accomodate linking restrictions
 455   // on legacy Linux implementations (which are no longer supported).
 456   // Eventually, all the library path setting will be done here.
 457   //
 458   // However, to prevent the proliferation of improperly built native
 459   // libraries, the new path component /usr/java/packages is added here.
 460   // Eventually, all the library path setting will be done here.
 461   {
 462     // Get the user setting of LD_LIBRARY_PATH, and prepended it. It
 463     // should always exist (until the legacy problem cited above is
 464     // addressed).
 465     const char *v = ::getenv("LD_LIBRARY_PATH");
 466     const char *v_colon = ":";
 467     if (v == NULL) { v = ""; v_colon = ""; }
 468     // That's +1 for the colon and +1 for the trailing '\0'.
 469     char *ld_library_path = NEW_C_HEAP_ARRAY(char,
 470                                              strlen(v) + 1 +
 471                                              sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1,
 472                                              mtInternal);
 473     sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon);
 474     Arguments::set_library_path(ld_library_path);
 475     FREE_C_HEAP_ARRAY(char, ld_library_path);
 476   }
 477 
 478   // Extensions directories.
 479   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 480   Arguments::set_ext_dirs(buf);
 481 
 482   FREE_C_HEAP_ARRAY(char, buf);
 483 
 484 #undef DEFAULT_LIBPATH
 485 #undef SYS_EXT_DIR
 486 #undef EXTENSIONS_DIR
 487 }
 488 
 489 ////////////////////////////////////////////////////////////////////////////////
 490 // breakpoint support
 491 
 492 void os::breakpoint() {
 493   BREAKPOINT;
 494 }
 495 
 496 extern "C" void breakpoint() {
 497   // use debugger to set breakpoint here
 498 }
 499 
 500 //////////////////////////////////////////////////////////////////////////////
 501 // detecting pthread library
 502 
 503 void os::Linux::libpthread_init() {
 504   // Save glibc and pthread version strings.
 505 #if !defined(_CS_GNU_LIBC_VERSION) || \
 506     !defined(_CS_GNU_LIBPTHREAD_VERSION)
 507   #error "glibc too old (< 2.3.2)"
 508 #endif
 509 
 510 #ifdef MUSL_LIBC
 511   // confstr() from musl libc returns EINVAL for
 512   // _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION
 513   os::Linux::set_libc_version("musl - unknown");
 514   os::Linux::set_libpthread_version("musl - unknown");
 515 #else
 516   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
 517   assert(n > 0, "cannot retrieve glibc version");
 518   char *str = (char *)malloc(n, mtInternal);
 519   confstr(_CS_GNU_LIBC_VERSION, str, n);
 520   os::Linux::set_libc_version(str);
 521 
 522   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
 523   assert(n > 0, "cannot retrieve pthread version");
 524   str = (char *)malloc(n, mtInternal);
 525   confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
 526   os::Linux::set_libpthread_version(str);
 527 #endif
 528 }
 529 
 530 /////////////////////////////////////////////////////////////////////////////
 531 // thread stack expansion
 532 
 533 // os::Linux::manually_expand_stack() takes care of expanding the thread
 534 // stack. Note that this is normally not needed: pthread stacks allocate
 535 // thread stack using mmap() without MAP_NORESERVE, so the stack is already
 536 // committed. Therefore it is not necessary to expand the stack manually.
 537 //
 538 // Manually expanding the stack was historically needed on LinuxThreads
 539 // thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays
 540 // it is kept to deal with very rare corner cases:
 541 //
 542 // For one, user may run the VM on an own implementation of threads
 543 // whose stacks are - like the old LinuxThreads - implemented using
 544 // mmap(MAP_GROWSDOWN).
 545 //
 546 // Also, this coding may be needed if the VM is running on the primordial
 547 // thread. Normally we avoid running on the primordial thread; however,
 548 // user may still invoke the VM on the primordial thread.
 549 //
 550 // The following historical comment describes the details about running
 551 // on a thread stack allocated with mmap(MAP_GROWSDOWN):
 552 
 553 
 554 // Force Linux kernel to expand current thread stack. If "bottom" is close
 555 // to the stack guard, caller should block all signals.
 556 //
 557 // MAP_GROWSDOWN:
 558 //   A special mmap() flag that is used to implement thread stacks. It tells
 559 //   kernel that the memory region should extend downwards when needed. This
 560 //   allows early versions of LinuxThreads to only mmap the first few pages
 561 //   when creating a new thread. Linux kernel will automatically expand thread
 562 //   stack as needed (on page faults).
 563 //
 564 //   However, because the memory region of a MAP_GROWSDOWN stack can grow on
 565 //   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
 566 //   region, it's hard to tell if the fault is due to a legitimate stack
 567 //   access or because of reading/writing non-exist memory (e.g. buffer
 568 //   overrun). As a rule, if the fault happens below current stack pointer,
 569 //   Linux kernel does not expand stack, instead a SIGSEGV is sent to the
 570 //   application (see Linux kernel fault.c).
 571 //
 572 //   This Linux feature can cause SIGSEGV when VM bangs thread stack for
 573 //   stack overflow detection.
 574 //
 575 //   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
 576 //   not use MAP_GROWSDOWN.
 577 //
 578 // To get around the problem and allow stack banging on Linux, we need to
 579 // manually expand thread stack after receiving the SIGSEGV.
 580 //
 581 // There are two ways to expand thread stack to address "bottom", we used
 582 // both of them in JVM before 1.5:
 583 //   1. adjust stack pointer first so that it is below "bottom", and then
 584 //      touch "bottom"
 585 //   2. mmap() the page in question
 586 //
 587 // Now alternate signal stack is gone, it's harder to use 2. For instance,
 588 // if current sp is already near the lower end of page 101, and we need to
 589 // call mmap() to map page 100, it is possible that part of the mmap() frame
 590 // will be placed in page 100. When page 100 is mapped, it is zero-filled.
 591 // That will destroy the mmap() frame and cause VM to crash.
 592 //
 593 // The following code works by adjusting sp first, then accessing the "bottom"
 594 // page to force a page fault. Linux kernel will then automatically expand the
 595 // stack mapping.
 596 //
 597 // _expand_stack_to() assumes its frame size is less than page size, which
 598 // should always be true if the function is not inlined.
 599 
 600 static void NOINLINE _expand_stack_to(address bottom) {
 601   address sp;
 602   size_t size;
 603   volatile char *p;
 604 
 605   // Adjust bottom to point to the largest address within the same page, it
 606   // gives us a one-page buffer if alloca() allocates slightly more memory.
 607   bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
 608   bottom += os::Linux::page_size() - 1;
 609 
 610   // sp might be slightly above current stack pointer; if that's the case, we
 611   // will alloca() a little more space than necessary, which is OK. Don't use
 612   // os::current_stack_pointer(), as its result can be slightly below current
 613   // stack pointer, causing us to not alloca enough to reach "bottom".
 614   sp = (address)&sp;
 615 
 616   if (sp > bottom) {
 617     size = sp - bottom;
 618     p = (volatile char *)alloca(size);
 619     assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
 620     p[0] = '\0';
 621   }
 622 }
 623 
 624 void os::Linux::expand_stack_to(address bottom) {
 625   _expand_stack_to(bottom);
 626 }
 627 
 628 bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
 629   assert(t!=NULL, "just checking");
 630   assert(t->osthread()->expanding_stack(), "expand should be set");
 631 
 632   if (t->is_in_usable_stack(addr)) {
 633     sigset_t mask_all, old_sigset;
 634     sigfillset(&mask_all);
 635     pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
 636     _expand_stack_to(addr);
 637     pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
 638     return true;
 639   }
 640   return false;
 641 }
 642 
 643 //////////////////////////////////////////////////////////////////////////////
 644 // create new thread
 645 
 646 // Thread start routine for all newly created threads
 647 static void *thread_native_entry(Thread *thread) {
 648 
 649   thread->record_stack_base_and_size();
 650 
 651 #ifndef __GLIBC__
 652   // Try to randomize the cache line index of hot stack frames.
 653   // This helps when threads of the same stack traces evict each other's
 654   // cache lines. The threads can be either from the same JVM instance, or
 655   // from different JVM instances. The benefit is especially true for
 656   // processors with hyperthreading technology.
 657   // This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING
 658   // and we did not see any degradation in performance without `alloca()`.
 659   static int counter = 0;
 660   int pid = os::current_process_id();
 661   int random = ((pid ^ counter++) & 7) * 128;
 662   void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0
 663   // Ensure the alloca result is used in a way that prevents the compiler from eliding it.
 664   *(char *)stackmem = 1;
 665 #endif
 666 
 667   thread->initialize_thread_current();
 668 
 669   OSThread* osthread = thread->osthread();
 670   Monitor* sync = osthread->startThread_lock();
 671 
 672   osthread->set_thread_id(os::current_thread_id());
 673 
 674   if (UseNUMA) {
 675     int lgrp_id = os::numa_get_group_id();
 676     if (lgrp_id != -1) {
 677       thread->set_lgrp_id(lgrp_id);
 678     }
 679   }
 680   // initialize signal mask for this thread
 681   PosixSignals::hotspot_sigmask(thread);
 682 
 683   // initialize floating point control register
 684   os::Linux::init_thread_fpu_state();
 685 
 686   // handshaking with parent thread
 687   {
 688     MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
 689 
 690     // notify parent thread
 691     osthread->set_state(INITIALIZED);
 692     sync->notify_all();
 693 
 694     // wait until os::start_thread()
 695     while (osthread->get_state() == INITIALIZED) {
 696       sync->wait_without_safepoint_check();
 697     }
 698   }
 699 
 700   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
 701     os::current_thread_id(), (uintx) pthread_self());
 702 
 703   assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");
 704 
 705   // call one more level start routine
 706   thread->call_run();
 707 
 708   // Note: at this point the thread object may already have deleted itself.
 709   // Prevent dereferencing it from here on out.
 710   thread = NULL;
 711 
 712   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
 713     os::current_thread_id(), (uintx) pthread_self());
 714 
 715   return 0;
 716 }
 717 
 718 // On Linux, glibc places static TLS blocks (for __thread variables) on
 719 // the thread stack. This decreases the stack size actually available
 720 // to threads.
 721 //
 722 // For large static TLS sizes, this may cause threads to malfunction due
 723 // to insufficient stack space. This is a well-known issue in glibc:
 724 // http://sourceware.org/bugzilla/show_bug.cgi?id=11787.
 725 //
 726 // As a workaround, we call a private but assumed-stable glibc function,
 727 // __pthread_get_minstack() to obtain the minstack size and derive the
 728 // static TLS size from it. We then increase the user requested stack
 729 // size by this TLS size.
 730 //
 731 // Due to compatibility concerns, this size adjustment is opt-in and
 732 // controlled via AdjustStackSizeForTLS.
 733 typedef size_t (*GetMinStack)(const pthread_attr_t *attr);
 734 
 735 GetMinStack _get_minstack_func = NULL;
 736 
 737 static void get_minstack_init() {
 738   _get_minstack_func =
 739         (GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack");
 740   log_info(os, thread)("Lookup of __pthread_get_minstack %s",
 741                        _get_minstack_func == NULL ? "failed" : "succeeded");
 742 }
 743 
 744 // Returns the size of the static TLS area glibc puts on thread stacks.
 745 // The value is cached on first use, which occurs when the first thread
 746 // is created during VM initialization.
 747 static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
 748   size_t tls_size = 0;
 749   if (_get_minstack_func != NULL) {
 750     // Obtain the pthread minstack size by calling __pthread_get_minstack.
 751     size_t minstack_size = _get_minstack_func(attr);
 752 
 753     // Remove non-TLS area size included in minstack size returned
 754     // by __pthread_get_minstack() to get the static TLS size.
 755     // In glibc before 2.27, minstack size includes guard_size.
 756     // In glibc 2.27 and later, guard_size is automatically added
 757     // to the stack size by pthread_create and is no longer included
 758     // in minstack size. In both cases, the guard_size is taken into
 759     // account, so there is no need to adjust the result for that.
 760     //
 761     // Although __pthread_get_minstack() is a private glibc function,
 762     // it is expected to have a stable behavior across future glibc
 763     // versions while glibc still allocates the static TLS blocks off
 764     // the stack. Following is glibc 2.28 __pthread_get_minstack():
 765     //
 766     // size_t
 767     // __pthread_get_minstack (const pthread_attr_t *attr)
 768     // {
 769     //   return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
 770     // }
 771     //
 772     //
 773     // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'
 774     // if check is done for precaution.
 775     if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
 776       tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
 777     }
 778   }
 779 
 780   log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,
 781                        tls_size);
 782   return tls_size;
 783 }
 784 
 785 bool os::create_thread(Thread* thread, ThreadType thr_type,
 786                        size_t req_stack_size) {
 787   assert(thread->osthread() == NULL, "caller responsible");
 788 
 789   // Allocate the OSThread object
 790   OSThread* osthread = new OSThread();
 791   if (osthread == NULL) {
 792     return false;
 793   }
 794 
 795   // set the correct thread state
 796   osthread->set_thread_type(thr_type);
 797 
 798   // Initial state is ALLOCATED but not INITIALIZED
 799   osthread->set_state(ALLOCATED);
 800 
 801   thread->set_osthread(osthread);
 802 
 803   // init thread attributes
 804   pthread_attr_t attr;
 805   pthread_attr_init(&attr);
 806   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 807 
 808   // Calculate stack size if it's not specified by caller.
 809   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 810   // In glibc versions prior to 2.27 the guard size mechanism
 811   // is not implemented properly. The posix standard requires adding
 812   // the size of the guard pages to the stack size, instead Linux
 813   // takes the space out of 'stacksize'. Thus we adapt the requested
 814   // stack_size by the size of the guard pages to mimick proper
 815   // behaviour. However, be careful not to end up with a size
 816   // of zero due to overflow. Don't add the guard page in that case.
 817   size_t guard_size = os::Linux::default_guard_size(thr_type);
 818   // Configure glibc guard page. Must happen before calling
 819   // get_static_tls_area_size(), which uses the guard_size.
 820   pthread_attr_setguardsize(&attr, guard_size);
 821 
 822   size_t stack_adjust_size = 0;
 823   if (AdjustStackSizeForTLS) {
 824     // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().
 825     stack_adjust_size += get_static_tls_area_size(&attr);
 826   } else {
 827     stack_adjust_size += guard_size;
 828   }
 829 
 830   stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size());
 831   if (stack_size <= SIZE_MAX - stack_adjust_size) {
 832     stack_size += stack_adjust_size;
 833   }
 834   assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
 835 
 836   int status = pthread_attr_setstacksize(&attr, stack_size);
 837   if (status != 0) {
 838     // pthread_attr_setstacksize() function can fail
 839     // if the stack size exceeds a system-imposed limit.
 840     assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
 841     log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
 842                             (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
 843                             stack_size / K);
 844     thread->set_osthread(NULL);
 845     delete osthread;
 846     return false;
 847   }
 848 
 849   ThreadState state;
 850 
 851   {
 852     ResourceMark rm;
 853     pthread_t tid;
 854     int ret = 0;
 855     int limit = 3;
 856     do {
 857       ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 858     } while (ret == EAGAIN && limit-- > 0);
 859 
 860     char buf[64];
 861     if (ret == 0) {
 862       log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 863                            thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 864     } else {
 865       log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.",
 866                               thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 867       // Log some OS information which might explain why creating the thread failed.
 868       log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 869       LogStream st(Log(os, thread)::info());
 870       os::Posix::print_rlimit_info(&st);
 871       os::print_memory_info(&st);
 872       os::Linux::print_proc_sys_info(&st);
 873       os::Linux::print_container_info(&st);
 874     }
 875 
 876     pthread_attr_destroy(&attr);
 877 
 878     if (ret != 0) {
 879       // Need to clean up stuff we've allocated so far
 880       thread->set_osthread(NULL);
 881       delete osthread;
 882       return false;
 883     }
 884 
 885     // Store pthread info into the OSThread
 886     osthread->set_pthread_id(tid);
 887 
 888     // Wait until child thread is either initialized or aborted
 889     {
 890       Monitor* sync_with_child = osthread->startThread_lock();
 891       MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
 892       while ((state = osthread->get_state()) == ALLOCATED) {
 893         sync_with_child->wait_without_safepoint_check();
 894       }
 895     }
 896   }
 897 
 898   // The thread is returned suspended (in state INITIALIZED),
 899   // and is started higher up in the call chain
 900   assert(state == INITIALIZED, "race condition");
 901   return true;
 902 }
 903 
 904 /////////////////////////////////////////////////////////////////////////////
 905 // attach existing thread
 906 
 907 // bootstrap the main thread
 908 bool os::create_main_thread(JavaThread* thread) {
 909   assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
 910   return create_attached_thread(thread);
 911 }
 912 
 913 bool os::create_attached_thread(JavaThread* thread) {
 914 #ifdef ASSERT
 915   thread->verify_not_published();
 916 #endif
 917 
 918   // Allocate the OSThread object
 919   OSThread* osthread = new OSThread();
 920 
 921   if (osthread == NULL) {
 922     return false;
 923   }
 924 
 925   // Store pthread info into the OSThread
 926   osthread->set_thread_id(os::Linux::gettid());
 927   osthread->set_pthread_id(::pthread_self());
 928 
 929   // initialize floating point control register
 930   os::Linux::init_thread_fpu_state();
 931 
 932   // Initial thread state is RUNNABLE
 933   osthread->set_state(RUNNABLE);
 934 
 935   thread->set_osthread(osthread);
 936 
 937   if (UseNUMA) {
 938     int lgrp_id = os::numa_get_group_id();
 939     if (lgrp_id != -1) {
 940       thread->set_lgrp_id(lgrp_id);
 941     }
 942   }
 943 
 944   if (os::is_primordial_thread()) {
 945     // If current thread is primordial thread, its stack is mapped on demand,
 946     // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
 947     // the entire stack region to avoid SEGV in stack banging.
 948     // It is also useful to get around the heap-stack-gap problem on SuSE
 949     // kernel (see 4821821 for details). We first expand stack to the top
 950     // of yellow zone, then enable stack yellow zone (order is significant,
 951     // enabling yellow zone first will crash JVM on SuSE Linux), so there
 952     // is no gap between the last two virtual memory regions.
 953 
 954     StackOverflow* overflow_state = thread->stack_overflow_state();
 955     address addr = overflow_state->stack_reserved_zone_base();
 956     assert(addr != NULL, "initialization problem?");
 957     assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");
 958 
 959     osthread->set_expanding_stack();
 960     os::Linux::manually_expand_stack(thread, addr);
 961     osthread->clear_expanding_stack();
 962   }
 963 
 964   // initialize signal mask for this thread
 965   // and save the caller's signal mask
 966   PosixSignals::hotspot_sigmask(thread);
 967 
 968   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
 969     os::current_thread_id(), (uintx) pthread_self());
 970 
 971   return true;
 972 }
 973 
 974 void os::pd_start_thread(Thread* thread) {
 975   OSThread * osthread = thread->osthread();
 976   assert(osthread->get_state() != INITIALIZED, "just checking");
 977   Monitor* sync_with_child = osthread->startThread_lock();
 978   MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
 979   sync_with_child->notify();
 980 }
 981 
 982 // Free Linux resources related to the OSThread
 983 void os::free_thread(OSThread* osthread) {
 984   assert(osthread != NULL, "osthread not set");
 985 
 986   // We are told to free resources of the argument thread,
 987   // but we can only really operate on the current thread.
 988   assert(Thread::current()->osthread() == osthread,
 989          "os::free_thread but not current thread");
 990 
 991 #ifdef ASSERT
 992   sigset_t current;
 993   sigemptyset(&current);
 994   pthread_sigmask(SIG_SETMASK, NULL, &current);
 995   assert(!sigismember(&current, PosixSignals::SR_signum), "SR signal should not be blocked!");
 996 #endif
 997 
 998   // Restore caller's signal mask
 999   sigset_t sigmask = osthread->caller_sigmask();
1000   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1001 
1002   delete osthread;
1003 }
1004 
1005 //////////////////////////////////////////////////////////////////////////////
1006 // primordial thread
1007 
1008 // Check if current thread is the primordial thread, similar to Solaris thr_main.
1009 bool os::is_primordial_thread(void) {
1010   if (suppress_primordial_thread_resolution) {
1011     return false;
1012   }
1013   char dummy;
1014   // If called before init complete, thread stack bottom will be null.
1015   // Can be called if fatal error occurs before initialization.
1016   if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
1017   assert(os::Linux::initial_thread_stack_bottom() != NULL &&
1018          os::Linux::initial_thread_stack_size()   != 0,
1019          "os::init did not locate primordial thread's stack region");
1020   if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
1021       (address)&dummy < os::Linux::initial_thread_stack_bottom() +
1022                         os::Linux::initial_thread_stack_size()) {
1023     return true;
1024   } else {
1025     return false;
1026   }
1027 }
1028 
1029 // Find the virtual memory area that contains addr
1030 static bool find_vma(address addr, address* vma_low, address* vma_high) {
1031   FILE *fp = os::fopen("/proc/self/maps", "r");
1032   if (fp) {
1033     address low, high;
1034     while (!feof(fp)) {
1035       if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1036         if (low <= addr && addr < high) {
1037           if (vma_low)  *vma_low  = low;
1038           if (vma_high) *vma_high = high;
1039           fclose(fp);
1040           return true;
1041         }
1042       }
1043       for (;;) {
1044         int ch = fgetc(fp);
1045         if (ch == EOF || ch == (int)'\n') break;
1046       }
1047     }
1048     fclose(fp);
1049   }
1050   return false;
1051 }
1052 
1053 // Locate primordial thread stack. This special handling of primordial thread stack
1054 // is needed because pthread_getattr_np() on most (all?) Linux distros returns
1055 // bogus value for the primordial process thread. While the launcher has created
1056 // the VM in a new thread since JDK 6, we still have to allow for the use of the
1057 // JNI invocation API from a primordial thread.
1058 void os::Linux::capture_initial_stack(size_t max_size) {
1059 
1060   // max_size is either 0 (which means accept OS default for thread stacks) or
1061   // a user-specified value known to be at least the minimum needed. If we
1062   // are actually on the primordial thread we can make it appear that we have a
1063   // smaller max_size stack by inserting the guard pages at that location. But we
1064   // cannot do anything to emulate a larger stack than what has been provided by
1065   // the OS or threading library. In fact if we try to use a stack greater than
1066   // what is set by rlimit then we will crash the hosting process.
1067 
1068   // Maximum stack size is the easy part, get it from RLIMIT_STACK.
1069   // If this is "unlimited" then it will be a huge value.
1070   struct rlimit rlim;
1071   getrlimit(RLIMIT_STACK, &rlim);
1072   size_t stack_size = rlim.rlim_cur;
1073 
1074   // 6308388: a bug in ld.so will relocate its own .data section to the
1075   //   lower end of primordial stack; reduce ulimit -s value a little bit
1076   //   so we won't install guard page on ld.so's data section.
1077   //   But ensure we don't underflow the stack size - allow 1 page spare
1078   if (stack_size >= (size_t)(3 * page_size())) {
1079     stack_size -= 2 * page_size();
1080   }
1081 
1082   // Try to figure out where the stack base (top) is. This is harder.
1083   //
1084   // When an application is started, glibc saves the initial stack pointer in
1085   // a global variable "__libc_stack_end", which is then used by system
1086   // libraries. __libc_stack_end should be pretty close to stack top. The
1087   // variable is available since the very early days. However, because it is
1088   // a private interface, it could disappear in the future.
1089   //
1090   // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1091   // to __libc_stack_end, it is very close to stack top, but isn't the real
1092   // stack top. Note that /proc may not exist if VM is running as a chroot
1093   // program, so reading /proc/<pid>/stat could fail. Also the contents of
1094   // /proc/<pid>/stat could change in the future (though unlikely).
1095   //
1096   // We try __libc_stack_end first. If that doesn't work, look for
1097   // /proc/<pid>/stat. If neither of them works, we use current stack pointer
1098   // as a hint, which should work well in most cases.
1099 
1100   uintptr_t stack_start;
1101 
1102   // try __libc_stack_end first
1103   uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1104   if (p && *p) {
1105     stack_start = *p;
1106   } else {
1107     // see if we can get the start_stack field from /proc/self/stat
1108     FILE *fp;
1109     int pid;
1110     char state;
1111     int ppid;
1112     int pgrp;
1113     int session;
1114     int nr;
1115     int tpgrp;
1116     unsigned long flags;
1117     unsigned long minflt;
1118     unsigned long cminflt;
1119     unsigned long majflt;
1120     unsigned long cmajflt;
1121     unsigned long utime;
1122     unsigned long stime;
1123     long cutime;
1124     long cstime;
1125     long prio;
1126     long nice;
1127     long junk;
1128     long it_real;
1129     uintptr_t start;
1130     uintptr_t vsize;
1131     intptr_t rss;
1132     uintptr_t rsslim;
1133     uintptr_t scodes;
1134     uintptr_t ecode;
1135     int i;
1136 
1137     // Figure what the primordial thread stack base is. Code is inspired
1138     // by email from Hans Boehm. /proc/self/stat begins with current pid,
1139     // followed by command name surrounded by parentheses, state, etc.
1140     char stat[2048];
1141     int statlen;
1142 
1143     fp = os::fopen("/proc/self/stat", "r");
1144     if (fp) {
1145       statlen = fread(stat, 1, 2047, fp);
1146       stat[statlen] = '\0';
1147       fclose(fp);
1148 
1149       // Skip pid and the command string. Note that we could be dealing with
1150       // weird command names, e.g. user could decide to rename java launcher
1151       // to "java 1.4.2 :)", then the stat file would look like
1152       //                1234 (java 1.4.2 :)) R ... ...
1153       // We don't really need to know the command string, just find the last
1154       // occurrence of ")" and then start parsing from there. See bug 4726580.
1155       char * s = strrchr(stat, ')');
1156 
1157       i = 0;
1158       if (s) {
1159         // Skip blank chars
1160         do { s++; } while (s && isspace(*s));
1161 
1162 #define _UFM UINTX_FORMAT
1163 #define _DFM INTX_FORMAT
1164 
1165         //                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2
1166         //              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8
1167         i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1168                    &state,          // 3  %c
1169                    &ppid,           // 4  %d
1170                    &pgrp,           // 5  %d
1171                    &session,        // 6  %d
1172                    &nr,             // 7  %d
1173                    &tpgrp,          // 8  %d
1174                    &flags,          // 9  %lu
1175                    &minflt,         // 10 %lu
1176                    &cminflt,        // 11 %lu
1177                    &majflt,         // 12 %lu
1178                    &cmajflt,        // 13 %lu
1179                    &utime,          // 14 %lu
1180                    &stime,          // 15 %lu
1181                    &cutime,         // 16 %ld
1182                    &cstime,         // 17 %ld
1183                    &prio,           // 18 %ld
1184                    &nice,           // 19 %ld
1185                    &junk,           // 20 %ld
1186                    &it_real,        // 21 %ld
1187                    &start,          // 22 UINTX_FORMAT
1188                    &vsize,          // 23 UINTX_FORMAT
1189                    &rss,            // 24 INTX_FORMAT
1190                    &rsslim,         // 25 UINTX_FORMAT
1191                    &scodes,         // 26 UINTX_FORMAT
1192                    &ecode,          // 27 UINTX_FORMAT
1193                    &stack_start);   // 28 UINTX_FORMAT
1194       }
1195 
1196 #undef _UFM
1197 #undef _DFM
1198 
1199       if (i != 28 - 2) {
1200         assert(false, "Bad conversion from /proc/self/stat");
1201         // product mode - assume we are the primordial thread, good luck in the
1202         // embedded case.
1203         warning("Can't detect primordial thread stack location - bad conversion");
1204         stack_start = (uintptr_t) &rlim;
1205       }
1206     } else {
1207       // For some reason we can't open /proc/self/stat (for example, running on
1208       // FreeBSD with a Linux emulator, or inside chroot), this should work for
1209       // most cases, so don't abort:
1210       warning("Can't detect primordial thread stack location - no /proc/self/stat");
1211       stack_start = (uintptr_t) &rlim;
1212     }
1213   }
1214 
1215   // Now we have a pointer (stack_start) very close to the stack top, the
1216   // next thing to do is to figure out the exact location of stack top. We
1217   // can find out the virtual memory area that contains stack_start by
1218   // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1219   // and its upper limit is the real stack top. (again, this would fail if
1220   // running inside chroot, because /proc may not exist.)
1221 
1222   uintptr_t stack_top;
1223   address low, high;
1224   if (find_vma((address)stack_start, &low, &high)) {
1225     // success, "high" is the true stack top. (ignore "low", because initial
1226     // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1227     stack_top = (uintptr_t)high;
1228   } else {
1229     // failed, likely because /proc/self/maps does not exist
1230     warning("Can't detect primordial thread stack location - find_vma failed");
1231     // best effort: stack_start is normally within a few pages below the real
1232     // stack top, use it as stack top, and reduce stack size so we won't put
1233     // guard page outside stack.
1234     stack_top = stack_start;
1235     stack_size -= 16 * page_size();
1236   }
1237 
1238   // stack_top could be partially down the page so align it
1239   stack_top = align_up(stack_top, page_size());
1240 
1241   // Allowed stack value is minimum of max_size and what we derived from rlimit
1242   if (max_size > 0) {
1243     _initial_thread_stack_size = MIN2(max_size, stack_size);
1244   } else {
1245     // Accept the rlimit max, but if stack is unlimited then it will be huge, so
1246     // clamp it at 8MB as we do on Solaris
1247     _initial_thread_stack_size = MIN2(stack_size, 8*M);
1248   }
1249   _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
1250   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1251 
1252   assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
1253 
1254   if (log_is_enabled(Info, os, thread)) {
1255     // See if we seem to be on primordial process thread
1256     bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) &&
1257                       uintptr_t(&rlim) < stack_top;
1258 
1259     log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: "
1260                          SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT,
1261                          primordial ? "primordial" : "user", max_size / K,  _initial_thread_stack_size / K,
1262                          stack_top, intptr_t(_initial_thread_stack_bottom));
1263   }
1264 }
1265 
1266 ////////////////////////////////////////////////////////////////////////////////
1267 // time support
1268 double os::elapsedVTime() {
1269   struct rusage usage;
1270   int retval = getrusage(RUSAGE_THREAD, &usage);
1271   if (retval == 0) {
1272     return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1273   } else {
1274     // better than nothing, but not much
1275     return elapsedTime();
1276   }
1277 }
1278 
1279 void os::Linux::fast_thread_clock_init() {
1280   if (!UseLinuxPosixThreadCPUClocks) {
1281     return;
1282   }
1283   clockid_t clockid;
1284   struct timespec tp;
1285   int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1286       (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1287 
1288   // Switch to using fast clocks for thread cpu time if
1289   // the clock_getres() returns 0 error code.
1290   // Note, that some kernels may support the current thread
1291   // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1292   // returned by the pthread_getcpuclockid().
1293   // If the fast Posix clocks are supported then the clock_getres()
1294   // must return at least tp.tv_sec == 0 which means a resolution
1295   // better than 1 sec. This is extra check for reliability.
1296 
1297   if (pthread_getcpuclockid_func &&
1298       pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1299       clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1300     _supports_fast_thread_cpu_time = true;
1301     _pthread_getcpuclockid = pthread_getcpuclockid_func;
1302   }
1303 }
1304 
1305 // thread_id is kernel thread id (similar to Solaris LWP id)
1306 intx os::current_thread_id() { return os::Linux::gettid(); }
1307 int os::current_process_id() {
1308   return ::getpid();
1309 }
1310 
1311 // DLL functions
1312 
1313 const char* os::dll_file_extension() { return ".so"; }
1314 
1315 // This must be hard coded because it's the system's temporary
1316 // directory not the java application's temp directory, ala java.io.tmpdir.
1317 const char* os::get_temp_directory() { return "/tmp"; }
1318 
1319 // check if addr is inside libjvm.so
1320 bool os::address_is_in_vm(address addr) {
1321   static address libjvm_base_addr;
1322   Dl_info dlinfo;
1323 
1324   if (libjvm_base_addr == NULL) {
1325     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1326       libjvm_base_addr = (address)dlinfo.dli_fbase;
1327     }
1328     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1329   }
1330 
1331   if (dladdr((void *)addr, &dlinfo) != 0) {
1332     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1333   }
1334 
1335   return false;
1336 }
1337 
1338 bool os::dll_address_to_function_name(address addr, char *buf,
1339                                       int buflen, int *offset,
1340                                       bool demangle) {
1341   // buf is not optional, but offset is optional
1342   assert(buf != NULL, "sanity check");
1343 
1344   Dl_info dlinfo;
1345 
1346   if (dladdr((void*)addr, &dlinfo) != 0) {
1347     // see if we have a matching symbol
1348     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1349       if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1350         jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1351       }
1352       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1353       return true;
1354     }
1355     // no matching symbol so try for just file info
1356     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1357       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1358                           buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1359         return true;
1360       }
1361     }
1362   }
1363 
1364   buf[0] = '\0';
1365   if (offset != NULL) *offset = -1;
1366   return false;
1367 }
1368 
1369 struct _address_to_library_name {
1370   address addr;          // input : memory address
1371   size_t  buflen;        //         size of fname
1372   char*   fname;         // output: library name
1373   address base;          //         library base addr
1374 };
1375 
1376 static int address_to_library_name_callback(struct dl_phdr_info *info,
1377                                             size_t size, void *data) {
1378   int i;
1379   bool found = false;
1380   address libbase = NULL;
1381   struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1382 
1383   // iterate through all loadable segments
1384   for (i = 0; i < info->dlpi_phnum; i++) {
1385     address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1386     if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1387       // base address of a library is the lowest address of its loaded
1388       // segments.
1389       if (libbase == NULL || libbase > segbase) {
1390         libbase = segbase;
1391       }
1392       // see if 'addr' is within current segment
1393       if (segbase <= d->addr &&
1394           d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1395         found = true;
1396       }
1397     }
1398   }
1399 
1400   // dlpi_name is NULL or empty if the ELF file is executable, return 0
1401   // so dll_address_to_library_name() can fall through to use dladdr() which
1402   // can figure out executable name from argv[0].
1403   if (found && info->dlpi_name && info->dlpi_name[0]) {
1404     d->base = libbase;
1405     if (d->fname) {
1406       jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1407     }
1408     return 1;
1409   }
1410   return 0;
1411 }
1412 
1413 bool os::dll_address_to_library_name(address addr, char* buf,
1414                                      int buflen, int* offset) {
1415   // buf is not optional, but offset is optional
1416   assert(buf != NULL, "sanity check");
1417 
1418   Dl_info dlinfo;
1419   struct _address_to_library_name data;
1420 
1421   // There is a bug in old glibc dladdr() implementation that it could resolve
1422   // to wrong library name if the .so file has a base address != NULL. Here
1423   // we iterate through the program headers of all loaded libraries to find
1424   // out which library 'addr' really belongs to. This workaround can be
1425   // removed once the minimum requirement for glibc is moved to 2.3.x.
1426   data.addr = addr;
1427   data.fname = buf;
1428   data.buflen = buflen;
1429   data.base = NULL;
1430   int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1431 
1432   if (rslt) {
1433     // buf already contains library name
1434     if (offset) *offset = addr - data.base;
1435     return true;
1436   }
1437   if (dladdr((void*)addr, &dlinfo) != 0) {
1438     if (dlinfo.dli_fname != NULL) {
1439       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1440     }
1441     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1442       *offset = addr - (address)dlinfo.dli_fbase;
1443     }
1444     return true;
1445   }
1446 
1447   buf[0] = '\0';
1448   if (offset) *offset = -1;
1449   return false;
1450 }
1451 
1452 // Loads .dll/.so and
1453 // in case of error it checks if .dll/.so was built for the
1454 // same architecture as Hotspot is running on
1455 
1456 
1457 // Remember the stack's state. The Linux dynamic linker will change
1458 // the stack to 'executable' at most once, so we must safepoint only once.
1459 bool os::Linux::_stack_is_executable = false;
1460 
1461 // VM operation that loads a library.  This is necessary if stack protection
1462 // of the Java stacks can be lost during loading the library.  If we
1463 // do not stop the Java threads, they can stack overflow before the stacks
1464 // are protected again.
1465 class VM_LinuxDllLoad: public VM_Operation {
1466  private:
1467   const char *_filename;
1468   char *_ebuf;
1469   int _ebuflen;
1470   void *_lib;
1471  public:
1472   VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1473     _filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1474   VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1475   void doit() {
1476     _lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1477     os::Linux::_stack_is_executable = true;
1478   }
1479   void* loaded_library() { return _lib; }
1480 };
1481 
1482 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1483   void * result = NULL;
1484   bool load_attempted = false;
1485 
1486   log_info(os)("attempting shared library load of %s", filename);
1487 
1488   // Check whether the library to load might change execution rights
1489   // of the stack. If they are changed, the protection of the stack
1490   // guard pages will be lost. We need a safepoint to fix this.
1491   //
1492   // See Linux man page execstack(8) for more info.
1493   if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1494     if (!ElfFile::specifies_noexecstack(filename)) {
1495       if (!is_init_completed()) {
1496         os::Linux::_stack_is_executable = true;
1497         // This is OK - No Java threads have been created yet, and hence no
1498         // stack guard pages to fix.
1499         //
1500         // Dynamic loader will make all stacks executable after
1501         // this function returns, and will not do that again.
1502         assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
1503       } else {
1504         warning("You have loaded library %s which might have disabled stack guard. "
1505                 "The VM will try to fix the stack guard now.\n"
1506                 "It's highly recommended that you fix the library with "
1507                 "'execstack -c <libfile>', or link it with '-z noexecstack'.",
1508                 filename);
1509 
1510         JavaThread *jt = JavaThread::current();
1511         if (jt->thread_state() != _thread_in_native) {
1512           // This happens when a compiler thread tries to load a hsdis-<arch>.so file
1513           // that requires ExecStack. Cannot enter safe point. Let's give up.
1514           warning("Unable to fix stack guard. Giving up.");
1515         } else {
1516           if (!LoadExecStackDllInVMThread) {
1517             // This is for the case where the DLL has an static
1518             // constructor function that executes JNI code. We cannot
1519             // load such DLLs in the VMThread.
1520             result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1521           }
1522 
1523           ThreadInVMfromNative tiv(jt);
1524           debug_only(VMNativeEntryWrapper vew;)
1525 
1526           VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1527           VMThread::execute(&op);
1528           if (LoadExecStackDllInVMThread) {
1529             result = op.loaded_library();
1530           }
1531           load_attempted = true;
1532         }
1533       }
1534     }
1535   }
1536 
1537   if (!load_attempted) {
1538     result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1539   }
1540 
1541   if (result != NULL) {
1542     // Successful loading
1543     return result;
1544   }
1545 
1546   Elf32_Ehdr elf_head;
1547   int diag_msg_max_length=ebuflen-strlen(ebuf);
1548   char* diag_msg_buf=ebuf+strlen(ebuf);
1549 
1550   if (diag_msg_max_length==0) {
1551     // No more space in ebuf for additional diagnostics message
1552     return NULL;
1553   }
1554 
1555 
1556   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1557 
1558   if (file_descriptor < 0) {
1559     // Can't open library, report dlerror() message
1560     return NULL;
1561   }
1562 
1563   bool failed_to_read_elf_head=
1564     (sizeof(elf_head)!=
1565      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1566 
1567   ::close(file_descriptor);
1568   if (failed_to_read_elf_head) {
1569     // file i/o error - report dlerror() msg
1570     return NULL;
1571   }
1572 
1573   if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
1574     // handle invalid/out of range endianness values
1575     if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
1576       return NULL;
1577     }
1578 
1579 #if defined(VM_LITTLE_ENDIAN)
1580     // VM is LE, shared object BE
1581     elf_head.e_machine = be16toh(elf_head.e_machine);
1582 #else
1583     // VM is BE, shared object LE
1584     elf_head.e_machine = le16toh(elf_head.e_machine);
1585 #endif
1586   }
1587 
1588   typedef struct {
1589     Elf32_Half    code;         // Actual value as defined in elf.h
1590     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
1591     unsigned char elf_class;    // 32 or 64 bit
1592     unsigned char endianness;   // MSB or LSB
1593     char*         name;         // String representation
1594   } arch_t;
1595 
1596 #ifndef EM_AARCH64
1597   #define EM_AARCH64    183               /* ARM AARCH64 */
1598 #endif
1599 #ifndef EM_RISCV
1600   #define EM_RISCV      243               /* RISC-V */
1601 #endif
1602 #ifndef EM_LOONGARCH
1603   #define EM_LOONGARCH  258               /* LoongArch */
1604 #endif
1605 
1606   static const arch_t arch_array[]={
1607     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1608     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1609     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1610     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1611     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1612     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1613     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1614     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1615 #if defined(VM_LITTLE_ENDIAN)
1616     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
1617     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
1618 #else
1619     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1620     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
1621 #endif
1622     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
1623     // we only support 64 bit z architecture
1624     {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
1625     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
1626     {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
1627     {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
1628     {EM_PARISC,      EM_PARISC,  ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
1629     {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
1630     {EM_AARCH64,     EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
1631     {EM_RISCV,       EM_RISCV,   ELFCLASS64, ELFDATA2LSB, (char*)"RISC-V"},
1632     {EM_LOONGARCH,   EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"},
1633   };
1634 
1635 #if  (defined IA32)
1636   static  Elf32_Half running_arch_code=EM_386;
1637 #elif   (defined AMD64) || (defined X32)
1638   static  Elf32_Half running_arch_code=EM_X86_64;
1639 #elif  (defined IA64)
1640   static  Elf32_Half running_arch_code=EM_IA_64;
1641 #elif  (defined __sparc) && (defined _LP64)
1642   static  Elf32_Half running_arch_code=EM_SPARCV9;
1643 #elif  (defined __sparc) && (!defined _LP64)
1644   static  Elf32_Half running_arch_code=EM_SPARC;
1645 #elif  (defined __powerpc64__)
1646   static  Elf32_Half running_arch_code=EM_PPC64;
1647 #elif  (defined __powerpc__)
1648   static  Elf32_Half running_arch_code=EM_PPC;
1649 #elif  (defined AARCH64)
1650   static  Elf32_Half running_arch_code=EM_AARCH64;
1651 #elif  (defined ARM)
1652   static  Elf32_Half running_arch_code=EM_ARM;
1653 #elif  (defined S390)
1654   static  Elf32_Half running_arch_code=EM_S390;
1655 #elif  (defined ALPHA)
1656   static  Elf32_Half running_arch_code=EM_ALPHA;
1657 #elif  (defined MIPSEL)
1658   static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
1659 #elif  (defined PARISC)
1660   static  Elf32_Half running_arch_code=EM_PARISC;
1661 #elif  (defined MIPS)
1662   static  Elf32_Half running_arch_code=EM_MIPS;
1663 #elif  (defined M68K)
1664   static  Elf32_Half running_arch_code=EM_68K;
1665 #elif  (defined SH)
1666   static  Elf32_Half running_arch_code=EM_SH;
1667 #elif  (defined RISCV)
1668   static  Elf32_Half running_arch_code=EM_RISCV;
1669 #elif  (defined LOONGARCH)
1670   static  Elf32_Half running_arch_code=EM_LOONGARCH;
1671 #else
1672     #error Method os::dll_load requires that one of following is defined:\
1673         AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
1674 #endif
1675 
1676   // Identify compatibility class for VM's architecture and library's architecture
1677   // Obtain string descriptions for architectures
1678 
1679   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1680   int running_arch_index=-1;
1681 
1682   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1683     if (running_arch_code == arch_array[i].code) {
1684       running_arch_index    = i;
1685     }
1686     if (lib_arch.code == arch_array[i].code) {
1687       lib_arch.compat_class = arch_array[i].compat_class;
1688       lib_arch.name         = arch_array[i].name;
1689     }
1690   }
1691 
1692   assert(running_arch_index != -1,
1693          "Didn't find running architecture code (running_arch_code) in arch_array");
1694   if (running_arch_index == -1) {
1695     // Even though running architecture detection failed
1696     // we may still continue with reporting dlerror() message
1697     return NULL;
1698   }
1699 
1700   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1701     if (lib_arch.name != NULL) {
1702       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1703                  " (Possible cause: can't load %s .so on a %s platform)",
1704                  lib_arch.name, arch_array[running_arch_index].name);
1705     } else {
1706       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1707                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
1708                  lib_arch.code, arch_array[running_arch_index].name);
1709     }
1710     return NULL;
1711   }
1712 
1713   if (lib_arch.endianness != arch_array[running_arch_index].endianness) {
1714     ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");
1715     return NULL;
1716   }
1717 
1718   // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
1719   if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
1720     ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
1721     return NULL;
1722   }
1723 
1724   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1725     ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1726                " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
1727                (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
1728     return NULL;
1729   }
1730 
1731   return NULL;
1732 }
1733 
1734 void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
1735                                 int ebuflen) {
1736   void * result = ::dlopen(filename, RTLD_LAZY);
1737   if (result == NULL) {
1738     const char* error_report = ::dlerror();
1739     if (error_report == NULL) {
1740       error_report = "dlerror returned no error description";
1741     }
1742     if (ebuf != NULL && ebuflen > 0) {
1743       ::strncpy(ebuf, error_report, ebuflen-1);
1744       ebuf[ebuflen-1]='\0';
1745     }
1746     Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1747     log_info(os)("shared library load of %s failed, %s", filename, error_report);
1748   } else {
1749     Events::log(NULL, "Loaded shared library %s", filename);
1750     log_info(os)("shared library load of %s was successful", filename);
1751   }
1752   return result;
1753 }
1754 
1755 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
1756                                        int ebuflen) {
1757   void * result = NULL;
1758   if (LoadExecStackDllInVMThread) {
1759     result = dlopen_helper(filename, ebuf, ebuflen);
1760   }
1761 
1762   // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
1763   // library that requires an executable stack, or which does not have this
1764   // stack attribute set, dlopen changes the stack attribute to executable. The
1765   // read protection of the guard pages gets lost.
1766   //
1767   // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
1768   // may have been queued at the same time.
1769 
1770   if (!_stack_is_executable) {
1771     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1772       StackOverflow* overflow_state = jt->stack_overflow_state();
1773       if (!overflow_state->stack_guard_zone_unused() &&     // Stack not yet fully initialized
1774           overflow_state->stack_guards_enabled()) {         // No pending stack overflow exceptions
1775         if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) {
1776           warning("Attempt to reguard stack yellow zone failed.");
1777         }
1778       }
1779     }
1780   }
1781 
1782   return result;
1783 }
1784 
1785 static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) {
1786   int fd = ::open(filename, O_RDONLY);
1787   if (fd == -1) {
1788     return false;
1789   }
1790 
1791   if (hdr != NULL) {
1792     st->print_cr("%s", hdr);
1793   }
1794 
1795   char buf[33];
1796   int bytes;
1797   buf[32] = '\0';
1798   while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) {
1799     st->print_raw(buf, bytes);
1800   }
1801 
1802   ::close(fd);
1803 
1804   return true;
1805 }
1806 
1807 static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) {
1808   st->print("%s:%c", header, same_line ? ' ' : '\n');
1809   if (!_print_ascii_file(filename, st)) {
1810     st->print_cr("<Not Available>");
1811   }
1812 }
1813 
1814 void os::print_dll_info(outputStream *st) {
1815   st->print_cr("Dynamic libraries:");
1816 
1817   char fname[32];
1818   pid_t pid = os::Linux::gettid();
1819 
1820   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
1821 
1822   if (!_print_ascii_file(fname, st)) {
1823     st->print_cr("Can not get library information for pid = %d", pid);
1824   }
1825 }
1826 
1827 struct loaded_modules_info_param {
1828   os::LoadedModulesCallbackFunc callback;
1829   void *param;
1830 };
1831 
1832 static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {
1833   if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {
1834     return 0;
1835   }
1836 
1837   struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data);
1838   address base = NULL;
1839   address top = NULL;
1840   for (int idx = 0; idx < info->dlpi_phnum; idx++) {
1841     const ElfW(Phdr) *phdr = info->dlpi_phdr + idx;
1842     if (phdr->p_type == PT_LOAD) {
1843       address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr);
1844 
1845       address phdr_base = align_down(raw_phdr_base, phdr->p_align);
1846       if ((base == NULL) || (base > phdr_base)) {
1847         base = phdr_base;
1848       }
1849 
1850       address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);
1851       if ((top == NULL) || (top < phdr_top)) {
1852         top = phdr_top;
1853       }
1854     }
1855   }
1856 
1857   return callback_param->callback(info->dlpi_name, base, top, callback_param->param);
1858 }
1859 
1860 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1861   struct loaded_modules_info_param callback_param = {callback, param};
1862   return dl_iterate_phdr(&dl_iterate_callback, &callback_param);
1863 }
1864 
1865 void os::print_os_info_brief(outputStream* st) {
1866   os::Linux::print_distro_info(st);
1867 
1868   os::Posix::print_uname_info(st);
1869 
1870   os::Linux::print_libversion_info(st);
1871 
1872 }
1873 
1874 void os::print_os_info(outputStream* st) {
1875   st->print_cr("OS:");
1876 
1877   os::Linux::print_distro_info(st);
1878 
1879   os::Posix::print_uname_info(st);
1880 
1881   os::Linux::print_uptime_info(st);
1882 
1883   // Print warning if unsafe chroot environment detected
1884   if (unsafe_chroot_detected) {
1885     st->print_cr("WARNING!! %s", unstable_chroot_error);
1886   }
1887 
1888   os::Linux::print_libversion_info(st);
1889 
1890   os::Posix::print_rlimit_info(st);
1891 
1892   os::Posix::print_load_average(st);
1893   st->cr();
1894 
1895   os::Linux::print_system_memory_info(st);
1896   st->cr();
1897 
1898   os::Linux::print_process_memory_info(st);
1899   st->cr();
1900 
1901   os::Linux::print_proc_sys_info(st);
1902   st->cr();
1903 
1904   if (os::Linux::print_ld_preload_file(st)) {
1905     st->cr();
1906   }
1907 
1908   if (os::Linux::print_container_info(st)) {
1909     st->cr();
1910   }
1911 
1912   VM_Version::print_platform_virtualization_info(st);
1913 
1914   os::Linux::print_steal_info(st);
1915 }
1916 
1917 // Try to identify popular distros.
1918 // Most Linux distributions have a /etc/XXX-release file, which contains
1919 // the OS version string. Newer Linux distributions have a /etc/lsb-release
1920 // file that also contains the OS version string. Some have more than one
1921 // /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
1922 // /etc/redhat-release.), so the order is important.
1923 // Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
1924 // their own specific XXX-release file as well as a redhat-release file.
1925 // Because of this the XXX-release file needs to be searched for before the
1926 // redhat-release file.
1927 // Since Red Hat and SuSE have an lsb-release file that is not very descriptive the
1928 // search for redhat-release / SuSE-release needs to be before lsb-release.
1929 // Since the lsb-release file is the new standard it needs to be searched
1930 // before the older style release files.
1931 // Searching system-release (Red Hat) and os-release (other Linuxes) are a
1932 // next to last resort.  The os-release file is a new standard that contains
1933 // distribution information and the system-release file seems to be an old
1934 // standard that has been replaced by the lsb-release and os-release files.
1935 // Searching for the debian_version file is the last resort.  It contains
1936 // an informative string like "6.0.6" or "wheezy/sid". Because of this
1937 // "Debian " is printed before the contents of the debian_version file.
1938 
1939 const char* distro_files[] = {
1940   "/etc/oracle-release",
1941   "/etc/mandriva-release",
1942   "/etc/mandrake-release",
1943   "/etc/sun-release",
1944   "/etc/redhat-release",
1945   "/etc/SuSE-release",
1946   "/etc/lsb-release",
1947   "/etc/turbolinux-release",
1948   "/etc/gentoo-release",
1949   "/etc/ltib-release",
1950   "/etc/angstrom-version",
1951   "/etc/system-release",
1952   "/etc/os-release",
1953   NULL };
1954 
1955 void os::Linux::print_distro_info(outputStream* st) {
1956   for (int i = 0;; i++) {
1957     const char* file = distro_files[i];
1958     if (file == NULL) {
1959       break;  // done
1960     }
1961     // If file prints, we found it.
1962     if (_print_ascii_file(file, st)) {
1963       return;
1964     }
1965   }
1966 
1967   if (file_exists("/etc/debian_version")) {
1968     st->print("Debian ");
1969     _print_ascii_file("/etc/debian_version", st);
1970   } else {
1971     st->print_cr("Linux");
1972   }
1973 }
1974 
1975 static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) {
1976   char buf[256];
1977   while (fgets(buf, sizeof(buf), fp)) {
1978     // Edit out extra stuff in expected format
1979     if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) {
1980       char* ptr = strstr(buf, "\"");  // the name is in quotes
1981       if (ptr != NULL) {
1982         ptr++; // go beyond first quote
1983         char* nl = strchr(ptr, '\"');
1984         if (nl != NULL) *nl = '\0';
1985         strncpy(distro, ptr, length);
1986       } else {
1987         ptr = strstr(buf, "=");
1988         ptr++; // go beyond equals then
1989         char* nl = strchr(ptr, '\n');
1990         if (nl != NULL) *nl = '\0';
1991         strncpy(distro, ptr, length);
1992       }
1993       return;
1994     } else if (get_first_line) {
1995       char* nl = strchr(buf, '\n');
1996       if (nl != NULL) *nl = '\0';
1997       strncpy(distro, buf, length);
1998       return;
1999     }
2000   }
2001   // print last line and close
2002   char* nl = strchr(buf, '\n');
2003   if (nl != NULL) *nl = '\0';
2004   strncpy(distro, buf, length);
2005 }
2006 
2007 static void parse_os_info(char* distro, size_t length, const char* file) {
2008   FILE* fp = os::fopen(file, "r");
2009   if (fp != NULL) {
2010     // if suse format, print out first line
2011     bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0);
2012     parse_os_info_helper(fp, distro, length, get_first_line);
2013     fclose(fp);
2014   }
2015 }
2016 
2017 void os::get_summary_os_info(char* buf, size_t buflen) {
2018   for (int i = 0;; i++) {
2019     const char* file = distro_files[i];
2020     if (file == NULL) {
2021       break; // ran out of distro_files
2022     }
2023     if (file_exists(file)) {
2024       parse_os_info(buf, buflen, file);
2025       return;
2026     }
2027   }
2028   // special case for debian
2029   if (file_exists("/etc/debian_version")) {
2030     strncpy(buf, "Debian ", buflen);
2031     if (buflen > 7) {
2032       parse_os_info(&buf[7], buflen-7, "/etc/debian_version");
2033     }
2034   } else {
2035     strncpy(buf, "Linux", buflen);
2036   }
2037 }
2038 
2039 void os::Linux::print_libversion_info(outputStream* st) {
2040   // libc, pthread
2041   st->print("libc: ");
2042   st->print("%s ", os::Linux::libc_version());
2043   st->print("%s ", os::Linux::libpthread_version());
2044   st->cr();
2045 }
2046 
2047 void os::Linux::print_proc_sys_info(outputStream* st) {
2048   _print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)",
2049                       "/proc/sys/kernel/threads-max", st);
2050   _print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",
2051                       "/proc/sys/vm/max_map_count", st);
2052   _print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",
2053                       "/proc/sys/kernel/pid_max", st);
2054 }
2055 
2056 void os::Linux::print_system_memory_info(outputStream* st) {
2057   _print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false);
2058   st->cr();
2059 
2060   // some information regarding THPs; for details see
2061   // https://www.kernel.org/doc/Documentation/vm/transhuge.txt
2062   _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",
2063                       "/sys/kernel/mm/transparent_hugepage/enabled", st);
2064   _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",
2065                       "/sys/kernel/mm/transparent_hugepage/defrag", st);
2066 }
2067 
2068 bool os::Linux::query_process_memory_info(os::Linux::meminfo_t* info) {
2069   FILE* f = os::fopen("/proc/self/status", "r");
2070   const int num_values = sizeof(os::Linux::meminfo_t) / sizeof(size_t);
2071   int num_found = 0;
2072   char buf[256];
2073   info->vmsize = info->vmpeak = info->vmrss = info->vmhwm = info->vmswap =
2074       info->rssanon = info->rssfile = info->rssshmem = -1;
2075   if (f != NULL) {
2076     while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) {
2077       if ( (info->vmsize == -1    && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &info->vmsize) == 1) ||
2078            (info->vmpeak == -1    && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &info->vmpeak) == 1) ||
2079            (info->vmswap == -1    && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &info->vmswap) == 1) ||
2080            (info->vmhwm == -1     && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &info->vmhwm) == 1) ||
2081            (info->vmrss == -1     && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &info->vmrss) == 1) ||
2082            (info->rssanon == -1   && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &info->rssanon) == 1) || // Needs Linux 4.5
2083            (info->rssfile == -1   && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &info->rssfile) == 1) || // Needs Linux 4.5
2084            (info->rssshmem == -1  && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &info->rssshmem) == 1)  // Needs Linux 4.5
2085            )
2086       {
2087         num_found ++;
2088       }
2089     }
2090     fclose(f);
2091     return true;
2092   }
2093   return false;
2094 }
2095 
2096 #ifdef __GLIBC__
2097 // For Glibc, print a one-liner with the malloc tunables.
2098 // Most important and popular is MALLOC_ARENA_MAX, but we are
2099 // thorough and print them all.
2100 static void print_glibc_malloc_tunables(outputStream* st) {
2101   static const char* var[] = {
2102       // the new variant
2103       "GLIBC_TUNABLES",
2104       // legacy variants
2105       "MALLOC_CHECK_", "MALLOC_TOP_PAD_", "MALLOC_PERTURB_",
2106       "MALLOC_MMAP_THRESHOLD_", "MALLOC_TRIM_THRESHOLD_",
2107       "MALLOC_MMAP_MAX_", "MALLOC_ARENA_TEST", "MALLOC_ARENA_MAX",
2108       NULL};
2109   st->print("glibc malloc tunables: ");
2110   bool printed = false;
2111   for (int i = 0; var[i] != NULL; i ++) {
2112     const char* const val = ::getenv(var[i]);
2113     if (val != NULL) {
2114       st->print("%s%s=%s", (printed ? ", " : ""), var[i], val);
2115       printed = true;
2116     }
2117   }
2118   if (!printed) {
2119     st->print("(default)");
2120   }
2121 }
2122 #endif // __GLIBC__
2123 
2124 void os::Linux::print_process_memory_info(outputStream* st) {
2125 
2126   st->print_cr("Process Memory:");
2127 
2128   // Print virtual and resident set size; peak values; swap; and for
2129   //  rss its components if the kernel is recent enough.
2130   meminfo_t info;
2131   if (query_process_memory_info(&info)) {
2132     st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmsize, info.vmpeak);
2133     st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmrss, info.vmhwm);
2134     if (info.rssanon != -1) { // requires kernel >= 4.5
2135       st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)",
2136                 info.rssanon, info.rssfile, info.rssshmem);
2137     }
2138     st->cr();
2139     if (info.vmswap != -1) { // requires kernel >= 2.6.34
2140       st->print_cr("Swapped out: " SSIZE_FORMAT "K", info.vmswap);
2141     }
2142   } else {
2143     st->print_cr("Could not open /proc/self/status to get process memory related information");
2144   }
2145 
2146   // glibc only:
2147   // - Print outstanding allocations using mallinfo
2148   // - Print glibc tunables
2149 #ifdef __GLIBC__
2150   size_t total_allocated = 0;
2151   bool might_have_wrapped = false;
2152   if (_mallinfo2 != NULL) {
2153     struct glibc_mallinfo2 mi = _mallinfo2();
2154     total_allocated = mi.uordblks;
2155   } else if (_mallinfo != NULL) {
2156     // mallinfo is an old API. Member names mean next to nothing and, beyond that, are 32-bit signed.
2157     // So for larger footprints the values may have wrapped around. We try to detect this here: if the
2158     // process whole resident set size is smaller than 4G, malloc footprint has to be less than that
2159     // and the numbers are reliable.
2160     struct glibc_mallinfo mi = _mallinfo();
2161     total_allocated = (size_t)(unsigned)mi.uordblks;
2162     // Since mallinfo members are int, glibc values may have wrapped. Warn about this.
2163     might_have_wrapped = (info.vmrss * K) > UINT_MAX && (info.vmrss * K) > (total_allocated + UINT_MAX);
2164   }
2165   if (_mallinfo2 != NULL || _mallinfo != NULL) {
2166     st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K%s",
2167                  total_allocated / K,
2168                  might_have_wrapped ? " (may have wrapped)" : "");
2169   }
2170   // Tunables
2171   print_glibc_malloc_tunables(st);
2172   st->cr();
2173 #endif
2174 }
2175 
2176 bool os::Linux::print_ld_preload_file(outputStream* st) {
2177   return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:");
2178 }
2179 
2180 void os::Linux::print_uptime_info(outputStream* st) {
2181   struct sysinfo sinfo;
2182   int ret = sysinfo(&sinfo);
2183   if (ret == 0) {
2184     os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
2185   }
2186 }
2187 
2188 bool os::Linux::print_container_info(outputStream* st) {
2189   if (!OSContainer::is_containerized()) {
2190     st->print_cr("container information not found.");
2191     return false;
2192   }
2193 
2194   st->print_cr("container (cgroup) information:");
2195 
2196   const char *p_ct = OSContainer::container_type();
2197   st->print_cr("container_type: %s", p_ct != NULL ? p_ct : "not supported");
2198 
2199   char *p = OSContainer::cpu_cpuset_cpus();
2200   st->print_cr("cpu_cpuset_cpus: %s", p != NULL ? p : "not supported");
2201   free(p);
2202 
2203   p = OSContainer::cpu_cpuset_memory_nodes();
2204   st->print_cr("cpu_memory_nodes: %s", p != NULL ? p : "not supported");
2205   free(p);
2206 
2207   int i = OSContainer::active_processor_count();
2208   st->print("active_processor_count: ");
2209   if (i > 0) {
2210     if (ActiveProcessorCount > 0) {
2211       st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
2212     } else {
2213       st->print_cr("%d", i);
2214     }
2215   } else {
2216     st->print_cr("not supported");
2217   }
2218 
2219   i = OSContainer::cpu_quota();
2220   st->print("cpu_quota: ");
2221   if (i > 0) {
2222     st->print_cr("%d", i);
2223   } else {
2224     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
2225   }
2226 
2227   i = OSContainer::cpu_period();
2228   st->print("cpu_period: ");
2229   if (i > 0) {
2230     st->print_cr("%d", i);
2231   } else {
2232     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
2233   }
2234 
2235   i = OSContainer::cpu_shares();
2236   st->print("cpu_shares: ");
2237   if (i > 0) {
2238     st->print_cr("%d", i);
2239   } else {
2240     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
2241   }
2242 
2243   jlong j = OSContainer::memory_limit_in_bytes();
2244   st->print("memory_limit_in_bytes: ");
2245   if (j > 0) {
2246     st->print_cr(JLONG_FORMAT, j);
2247   } else {
2248     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2249   }
2250 
2251   j = OSContainer::memory_and_swap_limit_in_bytes();
2252   st->print("memory_and_swap_limit_in_bytes: ");
2253   if (j > 0) {
2254     st->print_cr(JLONG_FORMAT, j);
2255   } else {
2256     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2257   }
2258 
2259   j = OSContainer::memory_soft_limit_in_bytes();
2260   st->print("memory_soft_limit_in_bytes: ");
2261   if (j > 0) {
2262     st->print_cr(JLONG_FORMAT, j);
2263   } else {
2264     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2265   }
2266 
2267   j = OSContainer::OSContainer::memory_usage_in_bytes();
2268   st->print("memory_usage_in_bytes: ");
2269   if (j > 0) {
2270     st->print_cr(JLONG_FORMAT, j);
2271   } else {
2272     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2273   }
2274 
2275   j = OSContainer::OSContainer::memory_max_usage_in_bytes();
2276   st->print("memory_max_usage_in_bytes: ");
2277   if (j > 0) {
2278     st->print_cr(JLONG_FORMAT, j);
2279   } else {
2280     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2281   }
2282 
2283   j = OSContainer::OSContainer::pids_max();
2284   st->print("maximum number of tasks: ");
2285   if (j > 0) {
2286     st->print_cr(JLONG_FORMAT, j);
2287   } else {
2288     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2289   }
2290 
2291   j = OSContainer::OSContainer::pids_current();
2292   st->print("current number of tasks: ");
2293   if (j > 0) {
2294     st->print_cr(JLONG_FORMAT, j);
2295   } else {
2296     if (j == OSCONTAINER_ERROR) {
2297       st->print_cr("not supported");
2298     }
2299   }
2300 
2301   return true;
2302 }
2303 
2304 void os::Linux::print_steal_info(outputStream* st) {
2305   if (has_initial_tick_info) {
2306     CPUPerfTicks pticks;
2307     bool res = os::Linux::get_tick_information(&pticks, -1);
2308 
2309     if (res && pticks.has_steal_ticks) {
2310       uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
2311       uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
2312       double steal_ticks_perc = 0.0;
2313       if (total_ticks_difference != 0) {
2314         steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
2315       }
2316       st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
2317       st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
2318     }
2319   }
2320 }
2321 
2322 void os::print_memory_info(outputStream* st) {
2323 
2324   st->print("Memory:");
2325   st->print(" %dk page", os::vm_page_size()>>10);
2326 
2327   // values in struct sysinfo are "unsigned long"
2328   struct sysinfo si;
2329   sysinfo(&si);
2330 
2331   st->print(", physical " UINT64_FORMAT "k",
2332             os::physical_memory() >> 10);
2333   st->print("(" UINT64_FORMAT "k free)",
2334             os::available_memory() >> 10);
2335   st->print(", swap " UINT64_FORMAT "k",
2336             ((jlong)si.totalswap * si.mem_unit) >> 10);
2337   st->print("(" UINT64_FORMAT "k free)",
2338             ((jlong)si.freeswap * si.mem_unit) >> 10);
2339   st->cr();
2340   st->print("Page Sizes: ");
2341   _page_sizes.print_on(st);
2342   st->cr();
2343 }
2344 
2345 // Print the first "model name" line and the first "flags" line
2346 // that we find and nothing more. We assume "model name" comes
2347 // before "flags" so if we find a second "model name", then the
2348 // "flags" field is considered missing.
2349 static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
2350 #if defined(IA32) || defined(AMD64)
2351   // Other platforms have less repetitive cpuinfo files
2352   FILE *fp = os::fopen("/proc/cpuinfo", "r");
2353   if (fp) {
2354     bool model_name_printed = false;
2355     while (!feof(fp)) {
2356       if (fgets(buf, buflen, fp)) {
2357         // Assume model name comes before flags
2358         if (strstr(buf, "model name") != NULL) {
2359           if (!model_name_printed) {
2360             st->print_raw("CPU Model and flags from /proc/cpuinfo:\n");
2361             st->print_raw(buf);
2362             model_name_printed = true;
2363           } else {
2364             // model name printed but not flags?  Odd, just return
2365             fclose(fp);
2366             return true;
2367           }
2368         }
2369         // print the flags line too
2370         if (strstr(buf, "flags") != NULL) {
2371           st->print_raw(buf);
2372           fclose(fp);
2373           return true;
2374         }
2375       }
2376     }
2377     fclose(fp);
2378   }
2379 #endif // x86 platforms
2380   return false;
2381 }
2382 
2383 // additional information about CPU e.g. available frequency ranges
2384 static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) {
2385   _print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st);
2386   _print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st);
2387 
2388   if (ExtensiveErrorReports) {
2389     // cache related info (cpu 0, should be similar for other CPUs)
2390     for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries
2391       char hbuf_level[60];
2392       char hbuf_type[60];
2393       char hbuf_size[60];
2394       char hbuf_coherency_line_size[80];
2395       snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i);
2396       snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i);
2397       snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i);
2398       snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i);
2399       if (os::file_exists(hbuf_level)) {
2400         _print_ascii_file_h("cache level", hbuf_level, st);
2401         _print_ascii_file_h("cache type", hbuf_type, st);
2402         _print_ascii_file_h("cache size", hbuf_size, st);
2403         _print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st);
2404       }
2405     }
2406   }
2407 
2408   // we miss the cpufreq entries on Power and s390x
2409 #if defined(IA32) || defined(AMD64)
2410   _print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
2411   _print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
2412   _print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
2413   // min and max should be in the Available range but still print them (not all info might be available for all kernels)
2414   if (ExtensiveErrorReports) {
2415     _print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);
2416     _print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);
2417     _print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);
2418   }
2419   // governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling
2420   if (ExtensiveErrorReports) {
2421     _print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);
2422   }
2423   _print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);
2424   // Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt
2425   // Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g.
2426   // whole chip is not fully utilized
2427   _print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st);
2428 #endif
2429 }
2430 
2431 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
2432   // Only print the model name if the platform provides this as a summary
2433   if (!print_model_name_and_flags(st, buf, buflen)) {
2434     _print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false);
2435   }
2436   st->cr();
2437   print_sys_devices_cpu_info(st, buf, buflen);
2438 }
2439 
2440 #if defined(AMD64) || defined(IA32) || defined(X32)
2441 const char* search_string = "model name";
2442 #elif defined(M68K)
2443 const char* search_string = "CPU";
2444 #elif defined(PPC64)
2445 const char* search_string = "cpu";
2446 #elif defined(S390)
2447 const char* search_string = "machine =";
2448 #elif defined(SPARC)
2449 const char* search_string = "cpu";
2450 #else
2451 const char* search_string = "Processor";
2452 #endif
2453 
2454 // Parses the cpuinfo file for string representing the model name.
2455 void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
2456   FILE* fp = os::fopen("/proc/cpuinfo", "r");
2457   if (fp != NULL) {
2458     while (!feof(fp)) {
2459       char buf[256];
2460       if (fgets(buf, sizeof(buf), fp)) {
2461         char* start = strstr(buf, search_string);
2462         if (start != NULL) {
2463           char *ptr = start + strlen(search_string);
2464           char *end = buf + strlen(buf);
2465           while (ptr != end) {
2466              // skip whitespace and colon for the rest of the name.
2467              if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') {
2468                break;
2469              }
2470              ptr++;
2471           }
2472           if (ptr != end) {
2473             // reasonable string, get rid of newline and keep the rest
2474             char* nl = strchr(buf, '\n');
2475             if (nl != NULL) *nl = '\0';
2476             strncpy(cpuinfo, ptr, length);
2477             fclose(fp);
2478             return;
2479           }
2480         }
2481       }
2482     }
2483     fclose(fp);
2484   }
2485   // cpuinfo not found or parsing failed, just print generic string.  The entire
2486   // /proc/cpuinfo file will be printed later in the file (or enough of it for x86)
2487 #if   defined(AARCH64)
2488   strncpy(cpuinfo, "AArch64", length);
2489 #elif defined(AMD64)
2490   strncpy(cpuinfo, "x86_64", length);
2491 #elif defined(ARM)  // Order wrt. AARCH64 is relevant!
2492   strncpy(cpuinfo, "ARM", length);
2493 #elif defined(IA32)
2494   strncpy(cpuinfo, "x86_32", length);
2495 #elif defined(IA64)
2496   strncpy(cpuinfo, "IA64", length);
2497 #elif defined(PPC)
2498   strncpy(cpuinfo, "PPC64", length);
2499 #elif defined(S390)
2500   strncpy(cpuinfo, "S390", length);
2501 #elif defined(SPARC)
2502   strncpy(cpuinfo, "sparcv9", length);
2503 #elif defined(ZERO_LIBARCH)
2504   strncpy(cpuinfo, ZERO_LIBARCH, length);
2505 #else
2506   strncpy(cpuinfo, "unknown", length);
2507 #endif
2508 }
2509 
2510 static char saved_jvm_path[MAXPATHLEN] = {0};
2511 
2512 // Find the full path to the current module, libjvm.so
2513 void os::jvm_path(char *buf, jint buflen) {
2514   // Error checking.
2515   if (buflen < MAXPATHLEN) {
2516     assert(false, "must use a large-enough buffer");
2517     buf[0] = '\0';
2518     return;
2519   }
2520   // Lazy resolve the path to current module.
2521   if (saved_jvm_path[0] != 0) {
2522     strcpy(buf, saved_jvm_path);
2523     return;
2524   }
2525 
2526   char dli_fname[MAXPATHLEN];
2527   dli_fname[0] = '\0';
2528   bool ret = dll_address_to_library_name(
2529                                          CAST_FROM_FN_PTR(address, os::jvm_path),
2530                                          dli_fname, sizeof(dli_fname), NULL);
2531   assert(ret, "cannot locate libjvm");
2532   char *rp = NULL;
2533   if (ret && dli_fname[0] != '\0') {
2534     rp = os::Posix::realpath(dli_fname, buf, buflen);
2535   }
2536   if (rp == NULL) {
2537     return;
2538   }
2539 
2540   if (Arguments::sun_java_launcher_is_altjvm()) {
2541     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2542     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
2543     // If "/jre/lib/" appears at the right place in the string, then
2544     // assume we are installed in a JDK and we're done. Otherwise, check
2545     // for a JAVA_HOME environment variable and fix up the path so it
2546     // looks like libjvm.so is installed there (append a fake suffix
2547     // hotspot/libjvm.so).
2548     const char *p = buf + strlen(buf) - 1;
2549     for (int count = 0; p > buf && count < 5; ++count) {
2550       for (--p; p > buf && *p != '/'; --p)
2551         /* empty */ ;
2552     }
2553 
2554     if (strncmp(p, "/jre/lib/", 9) != 0) {
2555       // Look for JAVA_HOME in the environment.
2556       char* java_home_var = ::getenv("JAVA_HOME");
2557       if (java_home_var != NULL && java_home_var[0] != 0) {
2558         char* jrelib_p;
2559         int len;
2560 
2561         // Check the current module name "libjvm.so".
2562         p = strrchr(buf, '/');
2563         if (p == NULL) {
2564           return;
2565         }
2566         assert(strstr(p, "/libjvm") == p, "invalid library name");
2567 
2568         rp = os::Posix::realpath(java_home_var, buf, buflen);
2569         if (rp == NULL) {
2570           return;
2571         }
2572 
2573         // determine if this is a legacy image or modules image
2574         // modules image doesn't have "jre" subdirectory
2575         len = strlen(buf);
2576         assert(len < buflen, "Ran out of buffer room");
2577         jrelib_p = buf + len;
2578         snprintf(jrelib_p, buflen-len, "/jre/lib");
2579         if (0 != access(buf, F_OK)) {
2580           snprintf(jrelib_p, buflen-len, "/lib");
2581         }
2582 
2583         if (0 == access(buf, F_OK)) {
2584           // Use current module name "libjvm.so"
2585           len = strlen(buf);
2586           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2587         } else {
2588           // Go back to path of .so
2589           rp = os::Posix::realpath(dli_fname, buf, buflen);
2590           if (rp == NULL) {
2591             return;
2592           }
2593         }
2594       }
2595     }
2596   }
2597 
2598   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2599   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2600 }
2601 
2602 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2603   // no prefix required, not even "_"
2604 }
2605 
2606 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2607   // no suffix required
2608 }
2609 
2610 ////////////////////////////////////////////////////////////////////////////////
2611 // Virtual Memory
2612 
2613 int os::vm_page_size() {
2614   // Seems redundant as all get out
2615   assert(os::Linux::page_size() != -1, "must call os::init");
2616   return os::Linux::page_size();
2617 }
2618 
2619 // Solaris allocates memory by pages.
2620 int os::vm_allocation_granularity() {
2621   assert(os::Linux::page_size() != -1, "must call os::init");
2622   return os::Linux::page_size();
2623 }
2624 
2625 // Rationale behind this function:
2626 //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2627 //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2628 //  samples for JITted code. Here we create private executable mapping over the code cache
2629 //  and then we can use standard (well, almost, as mapping can change) way to provide
2630 //  info for the reporting script by storing timestamp and location of symbol
2631 void linux_wrap_code(char* base, size_t size) {
2632   static volatile jint cnt = 0;
2633 
2634   if (!UseOprofile) {
2635     return;
2636   }
2637 
2638   char buf[PATH_MAX+1];
2639   int num = Atomic::add(&cnt, 1);
2640 
2641   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2642            os::get_temp_directory(), os::current_process_id(), num);
2643   unlink(buf);
2644 
2645   int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2646 
2647   if (fd != -1) {
2648     off_t rv = ::lseek(fd, size-2, SEEK_SET);
2649     if (rv != (off_t)-1) {
2650       if (::write(fd, "", 1) == 1) {
2651         mmap(base, size,
2652              PROT_READ|PROT_WRITE|PROT_EXEC,
2653              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2654       }
2655     }
2656     ::close(fd);
2657     unlink(buf);
2658   }
2659 }
2660 
2661 static bool recoverable_mmap_error(int err) {
2662   // See if the error is one we can let the caller handle. This
2663   // list of errno values comes from JBS-6843484. I can't find a
2664   // Linux man page that documents this specific set of errno
2665   // values so while this list currently matches Solaris, it may
2666   // change as we gain experience with this failure mode.
2667   switch (err) {
2668   case EBADF:
2669   case EINVAL:
2670   case ENOTSUP:
2671     // let the caller deal with these errors
2672     return true;
2673 
2674   default:
2675     // Any remaining errors on this OS can cause our reserved mapping
2676     // to be lost. That can cause confusion where different data
2677     // structures think they have the same memory mapped. The worst
2678     // scenario is if both the VM and a library think they have the
2679     // same memory mapped.
2680     return false;
2681   }
2682 }
2683 
2684 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2685                                     int err) {
2686   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2687           ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
2688           os::strerror(err), err);
2689 }
2690 
2691 static void warn_fail_commit_memory(char* addr, size_t size,
2692                                     size_t alignment_hint, bool exec,
2693                                     int err) {
2694   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2695           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,
2696           alignment_hint, exec, os::strerror(err), err);
2697 }
2698 
2699 // NOTE: Linux kernel does not really reserve the pages for us.
2700 //       All it does is to check if there are enough free pages
2701 //       left at the time of mmap(). This could be a potential
2702 //       problem.
2703 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2704   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2705   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2706                                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2707   if (res != (uintptr_t) MAP_FAILED) {
2708     if (UseNUMAInterleaving) {
2709       numa_make_global(addr, size);
2710     }
2711     return 0;
2712   }
2713 
2714   int err = errno;  // save errno from mmap() call above
2715 
2716   if (!recoverable_mmap_error(err)) {
2717     warn_fail_commit_memory(addr, size, exec, err);
2718     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2719   }
2720 
2721   return err;
2722 }
2723 
2724 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2725   return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2726 }
2727 
2728 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2729                                   const char* mesg) {
2730   assert(mesg != NULL, "mesg must be specified");
2731   int err = os::Linux::commit_memory_impl(addr, size, exec);
2732   if (err != 0) {
2733     // the caller wants all commit errors to exit with the specified mesg:
2734     warn_fail_commit_memory(addr, size, exec, err);
2735     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2736   }
2737 }
2738 
2739 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2740 #ifndef MAP_HUGETLB
2741   #define MAP_HUGETLB 0x40000
2742 #endif
2743 
2744 // If mmap flags are set with MAP_HUGETLB and the system supports multiple
2745 // huge page sizes, flag bits [26:31] can be used to encode the log2 of the
2746 // desired huge page size. Otherwise, the system's default huge page size will be used.
2747 // See mmap(2) man page for more info (since Linux 3.8).
2748 // https://lwn.net/Articles/533499/
2749 #ifndef MAP_HUGE_SHIFT
2750   #define MAP_HUGE_SHIFT 26
2751 #endif
2752 
2753 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2754 #ifndef MADV_HUGEPAGE
2755   #define MADV_HUGEPAGE 14
2756 #endif
2757 
2758 int os::Linux::commit_memory_impl(char* addr, size_t size,
2759                                   size_t alignment_hint, bool exec) {
2760   int err = os::Linux::commit_memory_impl(addr, size, exec);
2761   if (err == 0) {
2762     realign_memory(addr, size, alignment_hint);
2763   }
2764   return err;
2765 }
2766 
2767 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2768                           bool exec) {
2769   return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2770 }
2771 
2772 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2773                                   size_t alignment_hint, bool exec,
2774                                   const char* mesg) {
2775   assert(mesg != NULL, "mesg must be specified");
2776   int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2777   if (err != 0) {
2778     // the caller wants all commit errors to exit with the specified mesg:
2779     warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2780     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2781   }
2782 }
2783 
2784 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2785   if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2786     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2787     // be supported or the memory may already be backed by huge pages.
2788     ::madvise(addr, bytes, MADV_HUGEPAGE);
2789   }
2790 }
2791 
2792 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2793   // This method works by doing an mmap over an existing mmaping and effectively discarding
2794   // the existing pages. However it won't work for SHM-based large pages that cannot be
2795   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2796   // small pages on top of the SHM segment. This method always works for small pages, so we
2797   // allow that in any case.
2798   if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2799     commit_memory(addr, bytes, alignment_hint, !ExecMem);
2800   }
2801 }
2802 
2803 void os::numa_make_global(char *addr, size_t bytes) {
2804   Linux::numa_interleave_memory(addr, bytes);
2805 }
2806 
2807 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2808 // bind policy to MPOL_PREFERRED for the current thread.
2809 #define USE_MPOL_PREFERRED 0
2810 
2811 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2812   // To make NUMA and large pages more robust when both enabled, we need to ease
2813   // the requirements on where the memory should be allocated. MPOL_BIND is the
2814   // default policy and it will force memory to be allocated on the specified
2815   // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2816   // the specified node, but will not force it. Using this policy will prevent
2817   // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2818   // free large pages.
2819   Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2820   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2821 }
2822 
2823 bool os::numa_topology_changed() { return false; }
2824 
2825 size_t os::numa_get_groups_num() {
2826   // Return just the number of nodes in which it's possible to allocate memory
2827   // (in numa terminology, configured nodes).
2828   return Linux::numa_num_configured_nodes();
2829 }
2830 
2831 int os::numa_get_group_id() {
2832   int cpu_id = Linux::sched_getcpu();
2833   if (cpu_id != -1) {
2834     int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2835     if (lgrp_id != -1) {
2836       return lgrp_id;
2837     }
2838   }
2839   return 0;
2840 }
2841 
2842 int os::numa_get_group_id_for_address(const void* address) {
2843   void** pages = const_cast<void**>(&address);
2844   int id = -1;
2845 
2846   if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
2847     return -1;
2848   }
2849   if (id < 0) {
2850     return -1;
2851   }
2852   return id;
2853 }
2854 
2855 int os::Linux::get_existing_num_nodes() {
2856   int node;
2857   int highest_node_number = Linux::numa_max_node();
2858   int num_nodes = 0;
2859 
2860   // Get the total number of nodes in the system including nodes without memory.
2861   for (node = 0; node <= highest_node_number; node++) {
2862     if (is_node_in_existing_nodes(node)) {
2863       num_nodes++;
2864     }
2865   }
2866   return num_nodes;
2867 }
2868 
2869 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2870   int highest_node_number = Linux::numa_max_node();
2871   size_t i = 0;
2872 
2873   // Map all node ids in which it is possible to allocate memory. Also nodes are
2874   // not always consecutively available, i.e. available from 0 to the highest
2875   // node number. If the nodes have been bound explicitly using numactl membind,
2876   // then allocate memory from those nodes only.
2877   for (int node = 0; node <= highest_node_number; node++) {
2878     if (Linux::is_node_in_bound_nodes((unsigned int)node)) {
2879       ids[i++] = node;
2880     }
2881   }
2882   return i;
2883 }
2884 
2885 bool os::get_page_info(char *start, page_info* info) {
2886   return false;
2887 }
2888 
2889 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2890                      page_info* page_found) {
2891   return end;
2892 }
2893 
2894 
2895 int os::Linux::sched_getcpu_syscall(void) {
2896   unsigned int cpu = 0;
2897   int retval = -1;
2898 
2899 #if defined(IA32)
2900   #ifndef SYS_getcpu
2901     #define SYS_getcpu 318
2902   #endif
2903   retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2904 #elif defined(AMD64)
2905 // Unfortunately we have to bring all these macros here from vsyscall.h
2906 // to be able to compile on old linuxes.
2907   #define __NR_vgetcpu 2
2908   #define VSYSCALL_START (-10UL << 20)
2909   #define VSYSCALL_SIZE 1024
2910   #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2911   typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2912   vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2913   retval = vgetcpu(&cpu, NULL, NULL);
2914 #endif
2915 
2916   return (retval == -1) ? retval : cpu;
2917 }
2918 
2919 void os::Linux::sched_getcpu_init() {
2920   // sched_getcpu() should be in libc.
2921   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2922                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
2923 
2924   // If it's not, try a direct syscall.
2925   if (sched_getcpu() == -1) {
2926     set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2927                                     (void*)&sched_getcpu_syscall));
2928   }
2929 
2930   if (sched_getcpu() == -1) {
2931     vm_exit_during_initialization("getcpu(2) system call not supported by kernel");
2932   }
2933 }
2934 
2935 // Something to do with the numa-aware allocator needs these symbols
2936 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2937 extern "C" JNIEXPORT void numa_error(char *where) { }
2938 
2939 // Handle request to load libnuma symbol version 1.1 (API v1). If it fails
2940 // load symbol from base version instead.
2941 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2942   void *f = dlvsym(handle, name, "libnuma_1.1");
2943   if (f == NULL) {
2944     f = dlsym(handle, name);
2945   }
2946   return f;
2947 }
2948 
2949 // Handle request to load libnuma symbol version 1.2 (API v2) only.
2950 // Return NULL if the symbol is not defined in this particular version.
2951 void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
2952   return dlvsym(handle, name, "libnuma_1.2");
2953 }
2954 
2955 // Check numa dependent syscalls
2956 static bool numa_syscall_check() {
2957   // NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and
2958   // numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons.
2959   // Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary
2960   // to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking
2961   // others like mbind would cause unexpected side effects.
2962 #ifdef SYS_get_mempolicy
2963   int dummy = 0;
2964   if (syscall(SYS_get_mempolicy, &dummy, NULL, 0, (void*)&dummy, 3) == -1) {
2965     return false;
2966   }
2967 #endif
2968 
2969   return true;
2970 }
2971 
2972 bool os::Linux::libnuma_init() {
2973   // Requires sched_getcpu() and numa dependent syscalls support
2974   if ((sched_getcpu() != -1) && numa_syscall_check()) {
2975     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2976     if (handle != NULL) {
2977       set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2978                                            libnuma_dlsym(handle, "numa_node_to_cpus")));
2979       set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t,
2980                                               libnuma_v2_dlsym(handle, "numa_node_to_cpus")));
2981       set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2982                                        libnuma_dlsym(handle, "numa_max_node")));
2983       set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2984                                                    libnuma_dlsym(handle, "numa_num_configured_nodes")));
2985       set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2986                                         libnuma_dlsym(handle, "numa_available")));
2987       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2988                                             libnuma_dlsym(handle, "numa_tonode_memory")));
2989       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2990                                                 libnuma_dlsym(handle, "numa_interleave_memory")));
2991       set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
2992                                                 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
2993       set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2994                                               libnuma_dlsym(handle, "numa_set_bind_policy")));
2995       set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2996                                                libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2997       set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2998                                        libnuma_dlsym(handle, "numa_distance")));
2999       set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
3000                                           libnuma_v2_dlsym(handle, "numa_get_membind")));
3001       set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
3002                                                   libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
3003       set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
3004                                          libnuma_dlsym(handle, "numa_move_pages")));
3005       set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t,
3006                                             libnuma_dlsym(handle, "numa_set_preferred")));
3007 
3008       if (numa_available() != -1) {
3009         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3010         set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3011         set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3012         set_numa_interleave_bitmask(_numa_get_interleave_mask());
3013         set_numa_membind_bitmask(_numa_get_membind());
3014         // Create an index -> node mapping, since nodes are not always consecutive
3015         _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3016         rebuild_nindex_to_node_map();
3017         // Create a cpu -> node mapping
3018         _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3019         rebuild_cpu_to_node_map();
3020         return true;
3021       }
3022     }
3023   }
3024   return false;
3025 }
3026 
3027 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
3028   // Creating guard page is very expensive. Java thread has HotSpot
3029   // guard pages, only enable glibc guard page for non-Java threads.
3030   // (Remember: compiler thread is a Java thread, too!)
3031   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());
3032 }
3033 
3034 void os::Linux::rebuild_nindex_to_node_map() {
3035   int highest_node_number = Linux::numa_max_node();
3036 
3037   nindex_to_node()->clear();
3038   for (int node = 0; node <= highest_node_number; node++) {
3039     if (Linux::is_node_in_existing_nodes(node)) {
3040       nindex_to_node()->append(node);
3041     }
3042   }
3043 }
3044 
3045 // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
3046 // The table is later used in get_node_by_cpu().
3047 void os::Linux::rebuild_cpu_to_node_map() {
3048   const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
3049                               // in libnuma (possible values are starting from 16,
3050                               // and continuing up with every other power of 2, but less
3051                               // than the maximum number of CPUs supported by kernel), and
3052                               // is a subject to change (in libnuma version 2 the requirements
3053                               // are more reasonable) we'll just hardcode the number they use
3054                               // in the library.
3055   const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
3056 
3057   size_t cpu_num = processor_count();
3058   size_t cpu_map_size = NCPUS / BitsPerCLong;
3059   size_t cpu_map_valid_size =
3060     MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
3061 
3062   cpu_to_node()->clear();
3063   cpu_to_node()->at_grow(cpu_num - 1);
3064 
3065   size_t node_num = get_existing_num_nodes();
3066 
3067   int distance = 0;
3068   int closest_distance = INT_MAX;
3069   int closest_node = 0;
3070   unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
3071   for (size_t i = 0; i < node_num; i++) {
3072     // Check if node is configured (not a memory-less node). If it is not, find
3073     // the closest configured node. Check also if node is bound, i.e. it's allowed
3074     // to allocate memory from the node. If it's not allowed, map cpus in that node
3075     // to the closest node from which memory allocation is allowed.
3076     if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) ||
3077         !is_node_in_bound_nodes(nindex_to_node()->at(i))) {
3078       closest_distance = INT_MAX;
3079       // Check distance from all remaining nodes in the system. Ignore distance
3080       // from itself, from another non-configured node, and from another non-bound
3081       // node.
3082       for (size_t m = 0; m < node_num; m++) {
3083         if (m != i &&
3084             is_node_in_configured_nodes(nindex_to_node()->at(m)) &&
3085             is_node_in_bound_nodes(nindex_to_node()->at(m))) {
3086           distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
3087           // If a closest node is found, update. There is always at least one
3088           // configured and bound node in the system so there is always at least
3089           // one node close.
3090           if (distance != 0 && distance < closest_distance) {
3091             closest_distance = distance;
3092             closest_node = nindex_to_node()->at(m);
3093           }
3094         }
3095       }
3096      } else {
3097        // Current node is already a configured node.
3098        closest_node = nindex_to_node()->at(i);
3099      }
3100 
3101     // Get cpus from the original node and map them to the closest node. If node
3102     // is a configured node (not a memory-less node), then original node and
3103     // closest node are the same.
3104     if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
3105       for (size_t j = 0; j < cpu_map_valid_size; j++) {
3106         if (cpu_map[j] != 0) {
3107           for (size_t k = 0; k < BitsPerCLong; k++) {
3108             if (cpu_map[j] & (1UL << k)) {
3109               int cpu_index = j * BitsPerCLong + k;
3110 
3111 #ifndef PRODUCT
3112               if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) {
3113                 // Some debuggers limit the processor count without
3114                 // intercepting the NUMA APIs. Just fake the values.
3115                 cpu_index = 0;
3116               }
3117 #endif
3118 
3119               cpu_to_node()->at_put(cpu_index, closest_node);
3120             }
3121           }
3122         }
3123       }
3124     }
3125   }
3126   FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
3127 }
3128 
3129 int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
3130   // use the latest version of numa_node_to_cpus if available
3131   if (_numa_node_to_cpus_v2 != NULL) {
3132 
3133     // libnuma bitmask struct
3134     struct bitmask {
3135       unsigned long size; /* number of bits in the map */
3136       unsigned long *maskp;
3137     };
3138 
3139     struct bitmask mask;
3140     mask.maskp = (unsigned long *)buffer;
3141     mask.size = bufferlen * 8;
3142     return _numa_node_to_cpus_v2(node, &mask);
3143   } else if (_numa_node_to_cpus != NULL) {
3144     return _numa_node_to_cpus(node, buffer, bufferlen);
3145   }
3146   return -1;
3147 }
3148 
3149 int os::Linux::get_node_by_cpu(int cpu_id) {
3150   if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3151     return cpu_to_node()->at(cpu_id);
3152   }
3153   return -1;
3154 }
3155 
3156 GrowableArray<int>* os::Linux::_cpu_to_node;
3157 GrowableArray<int>* os::Linux::_nindex_to_node;
3158 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3159 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3160 os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2;
3161 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3162 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3163 os::Linux::numa_available_func_t os::Linux::_numa_available;
3164 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3165 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3166 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3167 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3168 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3169 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3170 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3171 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3172 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3173 os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
3174 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3175 unsigned long* os::Linux::_numa_all_nodes;
3176 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3177 struct bitmask* os::Linux::_numa_nodes_ptr;
3178 struct bitmask* os::Linux::_numa_interleave_bitmask;
3179 struct bitmask* os::Linux::_numa_membind_bitmask;
3180 
3181 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
3182   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3183                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3184   return res  != (uintptr_t) MAP_FAILED;
3185 }
3186 
3187 static address get_stack_commited_bottom(address bottom, size_t size) {
3188   address nbot = bottom;
3189   address ntop = bottom + size;
3190 
3191   size_t page_sz = os::vm_page_size();
3192   unsigned pages = size / page_sz;
3193 
3194   unsigned char vec[1];
3195   unsigned imin = 1, imax = pages + 1, imid;
3196   int mincore_return_value = 0;
3197 
3198   assert(imin <= imax, "Unexpected page size");
3199 
3200   while (imin < imax) {
3201     imid = (imax + imin) / 2;
3202     nbot = ntop - (imid * page_sz);
3203 
3204     // Use a trick with mincore to check whether the page is mapped or not.
3205     // mincore sets vec to 1 if page resides in memory and to 0 if page
3206     // is swapped output but if page we are asking for is unmapped
3207     // it returns -1,ENOMEM
3208     mincore_return_value = mincore(nbot, page_sz, vec);
3209 
3210     if (mincore_return_value == -1) {
3211       // Page is not mapped go up
3212       // to find first mapped page
3213       if (errno != EAGAIN) {
3214         assert(errno == ENOMEM, "Unexpected mincore errno");
3215         imax = imid;
3216       }
3217     } else {
3218       // Page is mapped go down
3219       // to find first not mapped page
3220       imin = imid + 1;
3221     }
3222   }
3223 
3224   nbot = nbot + page_sz;
3225 
3226   // Adjust stack bottom one page up if last checked page is not mapped
3227   if (mincore_return_value == -1) {
3228     nbot = nbot + page_sz;
3229   }
3230 
3231   return nbot;
3232 }
3233 
3234 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
3235   int mincore_return_value;
3236   const size_t stripe = 1024;  // query this many pages each time
3237   unsigned char vec[stripe + 1];
3238   // set a guard
3239   vec[stripe] = 'X';
3240 
3241   const size_t page_sz = os::vm_page_size();
3242   size_t pages = size / page_sz;
3243 
3244   assert(is_aligned(start, page_sz), "Start address must be page aligned");
3245   assert(is_aligned(size, page_sz), "Size must be page aligned");
3246 
3247   committed_start = NULL;
3248 
3249   int loops = (pages + stripe - 1) / stripe;
3250   int committed_pages = 0;
3251   address loop_base = start;
3252   bool found_range = false;
3253 
3254   for (int index = 0; index < loops && !found_range; index ++) {
3255     assert(pages > 0, "Nothing to do");
3256     int pages_to_query = (pages >= stripe) ? stripe : pages;
3257     pages -= pages_to_query;
3258 
3259     // Get stable read
3260     while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);
3261 
3262     // During shutdown, some memory goes away without properly notifying NMT,
3263     // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
3264     // Bailout and return as not committed for now.
3265     if (mincore_return_value == -1 && errno == ENOMEM) {
3266       return false;
3267     }
3268 
3269     assert(vec[stripe] == 'X', "overflow guard");
3270     assert(mincore_return_value == 0, "Range must be valid");
3271     // Process this stripe
3272     for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
3273       if ((vec[vecIdx] & 0x01) == 0) { // not committed
3274         // End of current contiguous region
3275         if (committed_start != NULL) {
3276           found_range = true;
3277           break;
3278         }
3279       } else { // committed
3280         // Start of region
3281         if (committed_start == NULL) {
3282           committed_start = loop_base + page_sz * vecIdx;
3283         }
3284         committed_pages ++;
3285       }
3286     }
3287 
3288     loop_base += pages_to_query * page_sz;
3289   }
3290 
3291   if (committed_start != NULL) {
3292     assert(committed_pages > 0, "Must have committed region");
3293     assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");
3294     assert(committed_start >= start && committed_start < start + size, "Out of range");
3295     committed_size = page_sz * committed_pages;
3296     return true;
3297   } else {
3298     assert(committed_pages == 0, "Should not have committed region");
3299     return false;
3300   }
3301 }
3302 
3303 
3304 // Linux uses a growable mapping for the stack, and if the mapping for
3305 // the stack guard pages is not removed when we detach a thread the
3306 // stack cannot grow beyond the pages where the stack guard was
3307 // mapped.  If at some point later in the process the stack expands to
3308 // that point, the Linux kernel cannot expand the stack any further
3309 // because the guard pages are in the way, and a segfault occurs.
3310 //
3311 // However, it's essential not to split the stack region by unmapping
3312 // a region (leaving a hole) that's already part of the stack mapping,
3313 // so if the stack mapping has already grown beyond the guard pages at
3314 // the time we create them, we have to truncate the stack mapping.
3315 // So, we need to know the extent of the stack mapping when
3316 // create_stack_guard_pages() is called.
3317 
3318 // We only need this for stacks that are growable: at the time of
3319 // writing thread stacks don't use growable mappings (i.e. those
3320 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
3321 // only applies to the main thread.
3322 
3323 // If the (growable) stack mapping already extends beyond the point
3324 // where we're going to put our guard pages, truncate the mapping at
3325 // that point by munmap()ping it.  This ensures that when we later
3326 // munmap() the guard pages we don't leave a hole in the stack
3327 // mapping. This only affects the main/primordial thread
3328 
3329 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3330   if (os::is_primordial_thread()) {
3331     // As we manually grow stack up to bottom inside create_attached_thread(),
3332     // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3333     // we don't need to do anything special.
3334     // Check it first, before calling heavy function.
3335     uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3336     unsigned char vec[1];
3337 
3338     if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3339       // Fallback to slow path on all errors, including EAGAIN
3340       assert((uintptr_t)addr >= stack_extent,
3341              "Sanity: addr should be larger than extent, " PTR_FORMAT " >= " PTR_FORMAT,
3342              p2i(addr), stack_extent);
3343       stack_extent = (uintptr_t) get_stack_commited_bottom(
3344                                                            os::Linux::initial_thread_stack_bottom(),
3345                                                            (size_t)addr - stack_extent);
3346     }
3347 
3348     if (stack_extent < (uintptr_t)addr) {
3349       ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3350     }
3351   }
3352 
3353   return os::commit_memory(addr, size, !ExecMem);
3354 }
3355 
3356 // If this is a growable mapping, remove the guard pages entirely by
3357 // munmap()ping them.  If not, just call uncommit_memory(). This only
3358 // affects the main/primordial thread, but guard against future OS changes.
3359 // It's safe to always unmap guard pages for primordial thread because we
3360 // always place it right after end of the mapped region.
3361 
3362 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3363   uintptr_t stack_extent, stack_base;
3364 
3365   if (os::is_primordial_thread()) {
3366     return ::munmap(addr, size) == 0;
3367   }
3368 
3369   return os::uncommit_memory(addr, size);
3370 }
3371 
3372 // 'requested_addr' is only treated as a hint, the return value may or
3373 // may not start from the requested address. Unlike Linux mmap(), this
3374 // function returns NULL to indicate failure.
3375 static char* anon_mmap(char* requested_addr, size_t bytes) {
3376   // MAP_FIXED is intentionally left out, to leave existing mappings intact.
3377   const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3378 
3379   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
3380   // touch an uncommitted page. Otherwise, the read/write might
3381   // succeed if we have enough swap space to back the physical page.
3382   char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
3383 
3384   return addr == MAP_FAILED ? NULL : addr;
3385 }
3386 
3387 // Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
3388 //   (req_addr != NULL) or with a given alignment.
3389 //  - bytes shall be a multiple of alignment.
3390 //  - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3391 //  - alignment sets the alignment at which memory shall be allocated.
3392 //     It must be a multiple of allocation granularity.
3393 // Returns address of memory or NULL. If req_addr was not NULL, will only return
3394 //  req_addr or NULL.
3395 static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) {
3396   size_t extra_size = bytes;
3397   if (req_addr == NULL && alignment > 0) {
3398     extra_size += alignment;
3399   }
3400 
3401   char* start = anon_mmap(req_addr, extra_size);
3402   if (start != NULL) {
3403     if (req_addr != NULL) {
3404       if (start != req_addr) {
3405         ::munmap(start, extra_size);
3406         start = NULL;
3407       }
3408     } else {
3409       char* const start_aligned = align_up(start, alignment);
3410       char* const end_aligned = start_aligned + bytes;
3411       char* const end = start + extra_size;
3412       if (start_aligned > start) {
3413         ::munmap(start, start_aligned - start);
3414       }
3415       if (end_aligned < end) {
3416         ::munmap(end_aligned, end - end_aligned);
3417       }
3418       start = start_aligned;
3419     }
3420   }
3421   return start;
3422 }
3423 
3424 static int anon_munmap(char * addr, size_t size) {
3425   return ::munmap(addr, size) == 0;
3426 }
3427 
3428 char* os::pd_reserve_memory(size_t bytes, bool exec) {
3429   return anon_mmap(NULL, bytes);
3430 }
3431 
3432 bool os::pd_release_memory(char* addr, size_t size) {
3433   return anon_munmap(addr, size);
3434 }
3435 
3436 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3437 extern char* g_assert_poison; // assertion poison page address
3438 #endif
3439 
3440 static bool linux_mprotect(char* addr, size_t size, int prot) {
3441   // Linux wants the mprotect address argument to be page aligned.
3442   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3443 
3444   // According to SUSv3, mprotect() should only be used with mappings
3445   // established by mmap(), and mmap() always maps whole pages. Unaligned
3446   // 'addr' likely indicates problem in the VM (e.g. trying to change
3447   // protection of malloc'ed or statically allocated memory). Check the
3448   // caller if you hit this assert.
3449   assert(addr == bottom, "sanity check");
3450 
3451   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3452   // Don't log anything if we're executing in the poison page signal handling
3453   // context. It can lead to reentrant use of other parts of the VM code.
3454 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3455   if (addr != g_assert_poison)
3456 #endif
3457   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
3458   return ::mprotect(bottom, size, prot) == 0;
3459 }
3460 
3461 // Set protections specified
3462 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3463                         bool is_committed) {
3464   unsigned int p = 0;
3465   switch (prot) {
3466   case MEM_PROT_NONE: p = PROT_NONE; break;
3467   case MEM_PROT_READ: p = PROT_READ; break;
3468   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3469   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3470   default:
3471     ShouldNotReachHere();
3472   }
3473   // is_committed is unused.
3474   return linux_mprotect(addr, bytes, p);
3475 }
3476 
3477 bool os::guard_memory(char* addr, size_t size) {
3478   return linux_mprotect(addr, size, PROT_NONE);
3479 }
3480 
3481 bool os::unguard_memory(char* addr, size_t size) {
3482   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3483 }
3484 
3485 bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
3486                                                     size_t page_size) {
3487   bool result = false;
3488   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3489                  MAP_ANONYMOUS|MAP_PRIVATE,
3490                  -1, 0);
3491   if (p != MAP_FAILED) {
3492     void *aligned_p = align_up(p, page_size);
3493 
3494     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3495 
3496     munmap(p, page_size * 2);
3497   }
3498 
3499   if (warn && !result) {
3500     warning("TransparentHugePages is not supported by the operating system.");
3501   }
3502 
3503   return result;
3504 }
3505 
3506 int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {
3507   if (page_size != default_large_page_size()) {
3508     return (exact_log2(page_size) << MAP_HUGE_SHIFT);
3509   }
3510   return 0;
3511 }
3512 
3513 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3514   // Include the page size flag to ensure we sanity check the correct page size.
3515   int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
3516   void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0);
3517 
3518   if (p != MAP_FAILED) {
3519     // Mapping succeeded, sanity check passed.
3520     munmap(p, page_size);
3521     return true;
3522   } else {
3523       log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, "
3524                          "checking if smaller large page sizes are usable",
3525                          byte_size_in_exact_unit(page_size),
3526                          exact_unit_for_byte_size(page_size));
3527       for (size_t page_size_ = _page_sizes.next_smaller(page_size);
3528           page_size_ != (size_t)os::vm_page_size();
3529           page_size_ = _page_sizes.next_smaller(page_size_)) {
3530         flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
3531         p = mmap(NULL, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
3532         if (p != MAP_FAILED) {
3533           // Mapping succeeded, sanity check passed.
3534           munmap(p, page_size_);
3535           log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check",
3536                              byte_size_in_exact_unit(page_size_),
3537                              exact_unit_for_byte_size(page_size_));
3538           return true;
3539         }
3540       }
3541   }
3542 
3543   if (warn) {
3544     warning("HugeTLBFS is not configured or not supported by the operating system.");
3545   }
3546 
3547   return false;
3548 }
3549 
3550 bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) {
3551   // Try to create a large shared memory segment.
3552   int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3553   if (shmid == -1) {
3554     // Possible reasons for shmget failure:
3555     // 1. shmmax is too small for the request.
3556     //    > check shmmax value: cat /proc/sys/kernel/shmmax
3557     //    > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
3558     // 2. not enough large page memory.
3559     //    > check available large pages: cat /proc/meminfo
3560     //    > increase amount of large pages:
3561     //          sysctl -w vm.nr_hugepages=new_value
3562     //    > For more information regarding large pages please refer to:
3563     //      https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
3564     if (warn) {
3565       warning("Large pages using UseSHM are not configured on this system.");
3566     }
3567     return false;
3568   }
3569   // Managed to create a segment, now delete it.
3570   shmctl(shmid, IPC_RMID, NULL);
3571   return true;
3572 }
3573 
3574 // From the coredump_filter documentation:
3575 //
3576 // - (bit 0) anonymous private memory
3577 // - (bit 1) anonymous shared memory
3578 // - (bit 2) file-backed private memory
3579 // - (bit 3) file-backed shared memory
3580 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3581 //           effective only if the bit 2 is cleared)
3582 // - (bit 5) hugetlb private memory
3583 // - (bit 6) hugetlb shared memory
3584 // - (bit 7) dax private memory
3585 // - (bit 8) dax shared memory
3586 //
3587 static void set_coredump_filter(CoredumpFilterBit bit) {
3588   FILE *f;
3589   long cdm;
3590 
3591   if ((f = os::fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3592     return;
3593   }
3594 
3595   if (fscanf(f, "%lx", &cdm) != 1) {
3596     fclose(f);
3597     return;
3598   }
3599 
3600   long saved_cdm = cdm;
3601   rewind(f);
3602   cdm |= bit;
3603 
3604   if (cdm != saved_cdm) {
3605     fprintf(f, "%#lx", cdm);
3606   }
3607 
3608   fclose(f);
3609 }
3610 
3611 // Large page support
3612 
3613 static size_t _large_page_size = 0;
3614 
3615 static size_t scan_default_large_page_size() {
3616   size_t default_large_page_size = 0;
3617 
3618   // large_page_size on Linux is used to round up heap size. x86 uses either
3619   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3620   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3621   // page as large as 1G.
3622   //
3623   // Here we try to figure out page size by parsing /proc/meminfo and looking
3624   // for a line with the following format:
3625   //    Hugepagesize:     2048 kB
3626   //
3627   // If we can't determine the value (e.g. /proc is not mounted, or the text
3628   // format has been changed), we'll set largest page size to 0
3629 
3630   FILE *fp = os::fopen("/proc/meminfo", "r");
3631   if (fp) {
3632     while (!feof(fp)) {
3633       int x = 0;
3634       char buf[16];
3635       if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3636         if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3637           default_large_page_size = x * K;
3638           break;
3639         }
3640       } else {
3641         // skip to next line
3642         for (;;) {
3643           int ch = fgetc(fp);
3644           if (ch == EOF || ch == (int)'\n') break;
3645         }
3646       }
3647     }
3648     fclose(fp);
3649   }
3650 
3651   return default_large_page_size;
3652 }
3653 
3654 static os::PageSizes scan_multiple_page_support() {
3655   // Scan /sys/kernel/mm/hugepages
3656   // to discover the available page sizes
3657   const char* sys_hugepages = "/sys/kernel/mm/hugepages";
3658   os::PageSizes page_sizes;
3659 
3660   DIR *dir = opendir(sys_hugepages);
3661 
3662   struct dirent *entry;
3663   size_t page_size;
3664   while ((entry = readdir(dir)) != NULL) {
3665     if (entry->d_type == DT_DIR &&
3666         sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {
3667       // The kernel is using kB, hotspot uses bytes
3668       // Add each found Large Page Size to page_sizes
3669       page_sizes.add(page_size * K);
3670     }
3671   }
3672   closedir(dir);
3673 
3674   LogTarget(Debug, pagesize) lt;
3675   if (lt.is_enabled()) {
3676     LogStream ls(lt);
3677     ls.print("Large Page sizes: ");
3678     page_sizes.print_on(&ls);
3679   }
3680 
3681   return page_sizes;
3682 }
3683 
3684 size_t os::Linux::default_large_page_size() {
3685   return _default_large_page_size;
3686 }
3687 
3688 void warn_no_large_pages_configured() {
3689   if (!FLAG_IS_DEFAULT(UseLargePages)) {
3690     log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");
3691   }
3692 }
3693 
3694 bool os::Linux::setup_large_page_type(size_t page_size) {
3695   if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3696       FLAG_IS_DEFAULT(UseSHM) &&
3697       FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3698 
3699     // The type of large pages has not been specified by the user.
3700 
3701     // Try UseHugeTLBFS and then UseSHM.
3702     UseHugeTLBFS = UseSHM = true;
3703 
3704     // Don't try UseTransparentHugePages since there are known
3705     // performance issues with it turned on. This might change in the future.
3706     UseTransparentHugePages = false;
3707   }
3708 
3709   if (UseTransparentHugePages) {
3710     bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3711     if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3712       UseHugeTLBFS = false;
3713       UseSHM = false;
3714       return true;
3715     }
3716     UseTransparentHugePages = false;
3717   }
3718 
3719   if (UseHugeTLBFS) {
3720     bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3721     if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3722       UseSHM = false;
3723       return true;
3724     }
3725     UseHugeTLBFS = false;
3726   }
3727 
3728   if (UseSHM) {
3729     bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM);
3730     if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3731       return true;
3732     }
3733     UseSHM = false;
3734   }
3735 
3736   warn_no_large_pages_configured();
3737   return false;
3738 }
3739 
3740 void os::large_page_init() {
3741   // 1) Handle the case where we do not want to use huge pages and hence
3742   //    there is no need to scan the OS for related info
3743   if (!UseLargePages &&
3744       !UseTransparentHugePages &&
3745       !UseHugeTLBFS &&
3746       !UseSHM) {
3747     // Not using large pages.
3748     return;
3749   }
3750 
3751   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3752     // The user explicitly turned off large pages.
3753     // Ignore the rest of the large pages flags.
3754     UseTransparentHugePages = false;
3755     UseHugeTLBFS = false;
3756     UseSHM = false;
3757     return;
3758   }
3759 
3760   // 2) Scan OS info
3761   size_t default_large_page_size = scan_default_large_page_size();
3762   os::Linux::_default_large_page_size = default_large_page_size;
3763   if (default_large_page_size == 0) {
3764     // No large pages configured, return.
3765     warn_no_large_pages_configured();
3766     UseLargePages = false;
3767     UseTransparentHugePages = false;
3768     UseHugeTLBFS = false;
3769     UseSHM = false;
3770     return;
3771   }
3772   os::PageSizes all_large_pages = scan_multiple_page_support();
3773 
3774   // 3) Consistency check and post-processing
3775 
3776   // It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually
3777   // re-add the default page size to the list of page sizes to be sure.
3778   all_large_pages.add(default_large_page_size);
3779 
3780   // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size
3781   // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes
3782   // doesn't match an available page size set _large_page_size to default_large_page_size
3783   // and use it as the maximum.
3784  if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||
3785       LargePageSizeInBytes == 0 ||
3786       LargePageSizeInBytes == default_large_page_size) {
3787     _large_page_size = default_large_page_size;
3788     log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",
3789                        byte_size_in_exact_unit(_large_page_size),
3790                        exact_unit_for_byte_size(_large_page_size));
3791   } else {
3792     if (all_large_pages.contains(LargePageSizeInBytes)) {
3793       _large_page_size = LargePageSizeInBytes;
3794       log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "
3795                          "using LargePageSizeInBytes: " SIZE_FORMAT "%s",
3796                          byte_size_in_exact_unit(default_large_page_size),
3797                          exact_unit_for_byte_size(default_large_page_size),
3798                          byte_size_in_exact_unit(_large_page_size),
3799                          exact_unit_for_byte_size(_large_page_size));
3800     } else {
3801       _large_page_size = default_large_page_size;
3802       log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "
3803                          "using the default large page size: " SIZE_FORMAT "%s",
3804                          byte_size_in_exact_unit(LargePageSizeInBytes),
3805                          exact_unit_for_byte_size(LargePageSizeInBytes),
3806                          byte_size_in_exact_unit(_large_page_size),
3807                          exact_unit_for_byte_size(_large_page_size));
3808     }
3809   }
3810 
3811   // Populate _page_sizes with large page sizes less than or equal to
3812   // _large_page_size.
3813   for (size_t page_size = _large_page_size; page_size != 0;
3814          page_size = all_large_pages.next_smaller(page_size)) {
3815     _page_sizes.add(page_size);
3816   }
3817 
3818   LogTarget(Info, pagesize) lt;
3819   if (lt.is_enabled()) {
3820     LogStream ls(lt);
3821     ls.print("Usable page sizes: ");
3822     _page_sizes.print_on(&ls);
3823   }
3824 
3825   // Now determine the type of large pages to use:
3826   UseLargePages = os::Linux::setup_large_page_type(_large_page_size);
3827 
3828   set_coredump_filter(LARGEPAGES_BIT);
3829 }
3830 
3831 #ifndef SHM_HUGETLB
3832   #define SHM_HUGETLB 04000
3833 #endif
3834 
3835 #define shm_warning_format(format, ...)              \
3836   do {                                               \
3837     if (UseLargePages &&                             \
3838         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3839          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3840          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3841       warning(format, __VA_ARGS__);                  \
3842     }                                                \
3843   } while (0)
3844 
3845 #define shm_warning(str) shm_warning_format("%s", str)
3846 
3847 #define shm_warning_with_errno(str)                \
3848   do {                                             \
3849     int err = errno;                               \
3850     shm_warning_format(str " (error = %d)", err);  \
3851   } while (0)
3852 
3853 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
3854   assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
3855 
3856   if (!is_aligned(alignment, SHMLBA)) {
3857     assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
3858     return NULL;
3859   }
3860 
3861   // To ensure that we get 'alignment' aligned memory from shmat,
3862   // we pre-reserve aligned virtual memory and then attach to that.
3863 
3864   char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment);
3865   if (pre_reserved_addr == NULL) {
3866     // Couldn't pre-reserve aligned memory.
3867     shm_warning("Failed to pre-reserve aligned memory for shmat.");
3868     return NULL;
3869   }
3870 
3871   // SHM_REMAP is needed to allow shmat to map over an existing mapping.
3872   char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
3873 
3874   if ((intptr_t)addr == -1) {
3875     int err = errno;
3876     shm_warning_with_errno("Failed to attach shared memory.");
3877 
3878     assert(err != EACCES, "Unexpected error");
3879     assert(err != EIDRM,  "Unexpected error");
3880     assert(err != EINVAL, "Unexpected error");
3881 
3882     // Since we don't know if the kernel unmapped the pre-reserved memory area
3883     // we can't unmap it, since that would potentially unmap memory that was
3884     // mapped from other threads.
3885     return NULL;
3886   }
3887 
3888   return addr;
3889 }
3890 
3891 static char* shmat_at_address(int shmid, char* req_addr) {
3892   if (!is_aligned(req_addr, SHMLBA)) {
3893     assert(false, "Requested address needs to be SHMLBA aligned");
3894     return NULL;
3895   }
3896 
3897   char* addr = (char*)shmat(shmid, req_addr, 0);
3898 
3899   if ((intptr_t)addr == -1) {
3900     shm_warning_with_errno("Failed to attach shared memory.");
3901     return NULL;
3902   }
3903 
3904   return addr;
3905 }
3906 
3907 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
3908   // If a req_addr has been provided, we assume that the caller has already aligned the address.
3909   if (req_addr != NULL) {
3910     assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
3911     assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
3912     return shmat_at_address(shmid, req_addr);
3913   }
3914 
3915   // Since shmid has been setup with SHM_HUGETLB, shmat will automatically
3916   // return large page size aligned memory addresses when req_addr == NULL.
3917   // However, if the alignment is larger than the large page size, we have
3918   // to manually ensure that the memory returned is 'alignment' aligned.
3919   if (alignment > os::large_page_size()) {
3920     assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
3921     return shmat_with_alignment(shmid, bytes, alignment);
3922   } else {
3923     return shmat_at_address(shmid, NULL);
3924   }
3925 }
3926 
3927 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
3928                                             char* req_addr, bool exec) {
3929   // "exec" is passed in but not used.  Creating the shared image for
3930   // the code cache doesn't have an SHM_X executable permission to check.
3931   assert(UseLargePages && UseSHM, "only for SHM large pages");
3932   assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
3933   assert(is_aligned(req_addr, alignment), "Unaligned address");
3934 
3935   if (!is_aligned(bytes, os::large_page_size())) {
3936     return NULL; // Fallback to small pages.
3937   }
3938 
3939   // Create a large shared memory region to attach to based on size.
3940   // Currently, size is the total size of the heap.
3941   int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3942   if (shmid == -1) {
3943     // Possible reasons for shmget failure:
3944     // 1. shmmax is too small for the request.
3945     //    > check shmmax value: cat /proc/sys/kernel/shmmax
3946     //    > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
3947     // 2. not enough large page memory.
3948     //    > check available large pages: cat /proc/meminfo
3949     //    > increase amount of large pages:
3950     //          sysctl -w vm.nr_hugepages=new_value
3951     //    > For more information regarding large pages please refer to:
3952     //      https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
3953     //      Note 1: different Linux may use different name for this property,
3954     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
3955     //      Note 2: it's possible there's enough physical memory available but
3956     //            they are so fragmented after a long run that they can't
3957     //            coalesce into large pages. Try to reserve large pages when
3958     //            the system is still "fresh".
3959     shm_warning_with_errno("Failed to reserve shared memory.");
3960     return NULL;
3961   }
3962 
3963   // Attach to the region.
3964   char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
3965 
3966   // Remove shmid. If shmat() is successful, the actual shared memory segment
3967   // will be deleted when it's detached by shmdt() or when the process
3968   // terminates. If shmat() is not successful this will remove the shared
3969   // segment immediately.
3970   shmctl(shmid, IPC_RMID, NULL);
3971 
3972   return addr;
3973 }
3974 
3975 static void log_on_commit_special_failure(char* req_addr, size_t bytes,
3976                                            size_t page_size, int error) {
3977   assert(error == ENOMEM, "Only expect to fail if no memory is available");
3978 
3979   log_info(pagesize)("Failed to reserve and commit memory with given page size. req_addr: " PTR_FORMAT
3980                      " size: " SIZE_FORMAT "%s, page size: " SIZE_FORMAT "%s, (errno = %d)",
3981                      p2i(req_addr), byte_size_in_exact_unit(bytes), exact_unit_for_byte_size(bytes),
3982                      byte_size_in_exact_unit(page_size), exact_unit_for_byte_size(page_size), error);
3983 }
3984 
3985 bool os::Linux::commit_memory_special(size_t bytes,
3986                                       size_t page_size,
3987                                       char* req_addr,
3988                                       bool exec) {
3989   assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used");
3990   assert(is_aligned(bytes, page_size), "Unaligned size");
3991   assert(is_aligned(req_addr, page_size), "Unaligned address");
3992   assert(req_addr != NULL, "Must have a requested address for special mappings");
3993 
3994   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3995   int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
3996 
3997   // For large pages additional flags are required.
3998   if (page_size > (size_t) os::vm_page_size()) {
3999     flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
4000   }
4001   char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
4002 
4003   if (addr == MAP_FAILED) {
4004     log_on_commit_special_failure(req_addr, bytes, page_size, errno);
4005     return false;
4006   }
4007 
4008   log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size="
4009                       SIZE_FORMAT "%s",
4010                       p2i(addr), byte_size_in_exact_unit(bytes),
4011                       exact_unit_for_byte_size(bytes),
4012                       byte_size_in_exact_unit(page_size),
4013                       exact_unit_for_byte_size(page_size));
4014   assert(is_aligned(addr, page_size), "Must be");
4015   return true;
4016 }
4017 
4018 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
4019                                                    size_t alignment,
4020                                                    size_t page_size,
4021                                                    char* req_addr,
4022                                                    bool exec) {
4023   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
4024   assert(is_aligned(req_addr, alignment), "Must be");
4025   assert(is_aligned(req_addr, page_size), "Must be");
4026   assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
4027   assert(_page_sizes.contains(page_size), "Must be a valid page size");
4028   assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size");
4029   assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
4030 
4031   // We only end up here when at least 1 large page can be used.
4032   // If the size is not a multiple of the large page size, we
4033   // will mix the type of pages used, but in a decending order.
4034   // Start off by reserving a range of the given size that is
4035   // properly aligned. At this point no pages are committed. If
4036   // a requested address is given it will be used and it must be
4037   // aligned to both the large page size and the given alignment.
4038   // The larger of the two will be used.
4039   size_t required_alignment = MAX(page_size, alignment);
4040   char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment);
4041   if (aligned_start == NULL) {
4042     return NULL;
4043   }
4044 
4045   // First commit using large pages.
4046   size_t large_bytes = align_down(bytes, page_size);
4047   bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec);
4048 
4049   if (large_committed && bytes == large_bytes) {
4050     // The size was large page aligned so no additional work is
4051     // needed even if the commit failed.
4052     return aligned_start;
4053   }
4054 
4055   // The requested size requires some small pages as well.
4056   char* small_start = aligned_start + large_bytes;
4057   size_t small_size = bytes - large_bytes;
4058   if (!large_committed) {
4059     // Failed to commit large pages, so we need to unmap the
4060     // reminder of the orinal reservation.
4061     ::munmap(small_start, small_size);
4062     return NULL;
4063   }
4064 
4065   // Commit the remaining bytes using small pages.
4066   bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec);
4067   if (!small_committed) {
4068     // Failed to commit the remaining size, need to unmap
4069     // the large pages part of the reservation.
4070     ::munmap(aligned_start, large_bytes);
4071     return NULL;
4072   }
4073   return aligned_start;
4074 }
4075 
4076 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
4077                                     char* req_addr, bool exec) {
4078   assert(UseLargePages, "only for large pages");
4079 
4080   char* addr;
4081   if (UseSHM) {
4082     // No support for using specific page sizes with SHM.
4083     addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
4084   } else {
4085     assert(UseHugeTLBFS, "must be");
4086     addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
4087   }
4088 
4089   if (addr != NULL) {
4090     if (UseNUMAInterleaving) {
4091       numa_make_global(addr, bytes);
4092     }
4093   }
4094 
4095   return addr;
4096 }
4097 
4098 bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
4099   // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
4100   return shmdt(base) == 0;
4101 }
4102 
4103 bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
4104   return pd_release_memory(base, bytes);
4105 }
4106 
4107 bool os::pd_release_memory_special(char* base, size_t bytes) {
4108   assert(UseLargePages, "only for large pages");
4109   bool res;
4110 
4111   if (UseSHM) {
4112     res = os::Linux::release_memory_special_shm(base, bytes);
4113   } else {
4114     assert(UseHugeTLBFS, "must be");
4115     res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
4116   }
4117   return res;
4118 }
4119 
4120 size_t os::large_page_size() {
4121   return _large_page_size;
4122 }
4123 
4124 // With SysV SHM the entire memory region must be allocated as shared
4125 // memory.
4126 // HugeTLBFS allows application to commit large page memory on demand.
4127 // However, when committing memory with HugeTLBFS fails, the region
4128 // that was supposed to be committed will lose the old reservation
4129 // and allow other threads to steal that memory region. Because of this
4130 // behavior we can't commit HugeTLBFS memory.
4131 bool os::can_commit_large_page_memory() {
4132   return UseTransparentHugePages;
4133 }
4134 
4135 bool os::can_execute_large_page_memory() {
4136   return UseTransparentHugePages || UseHugeTLBFS;
4137 }
4138 
4139 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
4140   assert(file_desc >= 0, "file_desc is not valid");
4141   char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
4142   if (result != NULL) {
4143     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
4144       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
4145     }
4146   }
4147   return result;
4148 }
4149 
4150 // Reserve memory at an arbitrary address, only if that area is
4151 // available (and not reserved for something else).
4152 
4153 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
4154   // Assert only that the size is a multiple of the page size, since
4155   // that's all that mmap requires, and since that's all we really know
4156   // about at this low abstraction level.  If we need higher alignment,
4157   // we can either pass an alignment to this method or verify alignment
4158   // in one of the methods further up the call chain.  See bug 5044738.
4159   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4160 
4161   // Repeatedly allocate blocks until the block is allocated at the
4162   // right spot.
4163 
4164   // Linux mmap allows caller to pass an address as hint; give it a try first,
4165   // if kernel honors the hint then we can return immediately.
4166   char * addr = anon_mmap(requested_addr, bytes);
4167   if (addr == requested_addr) {
4168     return requested_addr;
4169   }
4170 
4171   if (addr != NULL) {
4172     // mmap() is successful but it fails to reserve at the requested address
4173     anon_munmap(addr, bytes);
4174   }
4175 
4176   return NULL;
4177 }
4178 
4179 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
4180 void os::infinite_sleep() {
4181   while (true) {    // sleep forever ...
4182     ::sleep(100);   // ... 100 seconds at a time
4183   }
4184 }
4185 
4186 // Used to convert frequent JVM_Yield() to nops
4187 bool os::dont_yield() {
4188   return DontYieldALot;
4189 }
4190 
4191 // Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
4192 // actually give up the CPU. Since skip buddy (v2.6.28):
4193 //
4194 // * Sets the yielding task as skip buddy for current CPU's run queue.
4195 // * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
4196 // * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
4197 //
4198 // An alternative is calling os::naked_short_nanosleep with a small number to avoid
4199 // getting re-scheduled immediately.
4200 //
4201 void os::naked_yield() {
4202   sched_yield();
4203 }
4204 
4205 ////////////////////////////////////////////////////////////////////////////////
4206 // thread priority support
4207 
4208 // Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
4209 // only supports dynamic priority, static priority must be zero. For real-time
4210 // applications, Linux supports SCHED_RR which allows static priority (1-99).
4211 // However, for large multi-threaded applications, SCHED_RR is not only slower
4212 // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
4213 // of 5 runs - Sep 2005).
4214 //
4215 // The following code actually changes the niceness of kernel-thread/LWP. It
4216 // has an assumption that setpriority() only modifies one kernel-thread/LWP,
4217 // not the entire user process, and user level threads are 1:1 mapped to kernel
4218 // threads. It has always been the case, but could change in the future. For
4219 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
4220 // It is only used when ThreadPriorityPolicy=1 and may require system level permission
4221 // (e.g., root privilege or CAP_SYS_NICE capability).
4222 
4223 int os::java_to_os_priority[CriticalPriority + 1] = {
4224   19,              // 0 Entry should never be used
4225 
4226    4,              // 1 MinPriority
4227    3,              // 2
4228    2,              // 3
4229 
4230    1,              // 4
4231    0,              // 5 NormPriority
4232   -1,              // 6
4233 
4234   -2,              // 7
4235   -3,              // 8
4236   -4,              // 9 NearMaxPriority
4237 
4238   -5,              // 10 MaxPriority
4239 
4240   -5               // 11 CriticalPriority
4241 };
4242 
4243 static int prio_init() {
4244   if (ThreadPriorityPolicy == 1) {
4245     if (geteuid() != 0) {
4246       if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
4247         warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
4248                 "e.g., being the root user. If the necessary permission is not " \
4249                 "possessed, changes to priority will be silently ignored.");
4250       }
4251     }
4252   }
4253   if (UseCriticalJavaThreadPriority) {
4254     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
4255   }
4256   return 0;
4257 }
4258 
4259 OSReturn os::set_native_priority(Thread* thread, int newpri) {
4260   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
4261 
4262   int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
4263   return (ret == 0) ? OS_OK : OS_ERR;
4264 }
4265 
4266 OSReturn os::get_native_priority(const Thread* const thread,
4267                                  int *priority_ptr) {
4268   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
4269     *priority_ptr = java_to_os_priority[NormPriority];
4270     return OS_OK;
4271   }
4272 
4273   errno = 0;
4274   *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
4275   return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
4276 }
4277 
4278 // This is the fastest way to get thread cpu time on Linux.
4279 // Returns cpu time (user+sys) for any thread, not only for current.
4280 // POSIX compliant clocks are implemented in the kernels 2.6.16+.
4281 // It might work on 2.6.10+ with a special kernel/glibc patch.
4282 // For reference, please, see IEEE Std 1003.1-2004:
4283 //   http://www.unix.org/single_unix_specification
4284 
4285 jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4286   struct timespec tp;
4287   int status = clock_gettime(clockid, &tp);
4288   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
4289   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4290 }
4291 
4292 // Determine if the vmid is the parent pid for a child in a PID namespace.
4293 // Return the namespace pid if so, otherwise -1.
4294 int os::Linux::get_namespace_pid(int vmid) {
4295   char fname[24];
4296   int retpid = -1;
4297 
4298   snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);
4299   FILE *fp = os::fopen(fname, "r");
4300 
4301   if (fp) {
4302     int pid, nspid;
4303     int ret;
4304     while (!feof(fp) && !ferror(fp)) {
4305       ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);
4306       if (ret == 1) {
4307         break;
4308       }
4309       if (ret == 2) {
4310         retpid = nspid;
4311         break;
4312       }
4313       for (;;) {
4314         int ch = fgetc(fp);
4315         if (ch == EOF || ch == (int)'\n') break;
4316       }
4317     }
4318     fclose(fp);
4319   }
4320   return retpid;
4321 }
4322 
4323 extern void report_error(char* file_name, int line_no, char* title,
4324                          char* format, ...);
4325 
4326 // Some linux distributions (notably: Alpine Linux) include the
4327 // grsecurity in the kernel. Of particular interest from a JVM perspective
4328 // is PaX (https://pax.grsecurity.net/), which adds some security features
4329 // related to page attributes. Specifically, the MPROTECT PaX functionality
4330 // (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic
4331 // code generation by disallowing a (previously) writable page to be
4332 // marked as executable. This is, of course, exactly what HotSpot does
4333 // for both JIT compiled method, as well as for stubs, adapters, etc.
4334 //
4335 // Instead of crashing "lazily" when trying to make a page executable,
4336 // this code probes for the presence of PaX and reports the failure
4337 // eagerly.
4338 static void check_pax(void) {
4339   // Zero doesn't generate code dynamically, so no need to perform the PaX check
4340 #ifndef ZERO
4341   size_t size = os::Linux::page_size();
4342 
4343   void* p = ::mmap(NULL, size, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4344   if (p == MAP_FAILED) {
4345     log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno));
4346     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check.");
4347   }
4348 
4349   int res = ::mprotect(p, size, PROT_WRITE|PROT_EXEC);
4350   if (res == -1) {
4351     log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno));
4352     vm_exit_during_initialization(
4353       "Failed to mark memory page as executable - check if grsecurity/PaX is enabled");
4354   }
4355 
4356   ::munmap(p, size);
4357 #endif
4358 }
4359 
4360 // this is called _before_ most of the global arguments have been parsed
4361 void os::init(void) {
4362   char dummy;   // used to get a guess on initial stack address
4363 
4364   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
4365 
4366   Linux::set_page_size(sysconf(_SC_PAGESIZE));
4367   if (Linux::page_size() == -1) {
4368     fatal("os_linux.cpp: os::init: sysconf failed (%s)",
4369           os::strerror(errno));
4370   }
4371   _page_sizes.add(Linux::page_size());
4372 
4373   Linux::initialize_system_info();
4374 
4375 #ifdef __GLIBC__
4376   Linux::_mallinfo = CAST_TO_FN_PTR(Linux::mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo"));
4377   Linux::_mallinfo2 = CAST_TO_FN_PTR(Linux::mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2"));
4378 #endif // __GLIBC__
4379 
4380   os::Linux::CPUPerfTicks pticks;
4381   bool res = os::Linux::get_tick_information(&pticks, -1);
4382 
4383   if (res && pticks.has_steal_ticks) {
4384     has_initial_tick_info = true;
4385     initial_total_ticks = pticks.total;
4386     initial_steal_ticks = pticks.steal;
4387   }
4388 
4389   // _main_thread points to the thread that created/loaded the JVM.
4390   Linux::_main_thread = pthread_self();
4391 
4392   // retrieve entry point for pthread_setname_np
4393   Linux::_pthread_setname_np =
4394     (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
4395 
4396   check_pax();
4397 
4398   os::Posix::init();
4399 }
4400 
4401 // To install functions for atexit system call
4402 extern "C" {
4403   static void perfMemory_exit_helper() {
4404     perfMemory_exit();
4405   }
4406 }
4407 
4408 void os::pd_init_container_support() {
4409   OSContainer::init();
4410 }
4411 
4412 void os::Linux::numa_init() {
4413 
4414   // Java can be invoked as
4415   // 1. Without numactl and heap will be allocated/configured on all nodes as
4416   //    per the system policy.
4417   // 2. With numactl --interleave:
4418   //      Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same
4419   //      API for membind case bitmask is reset.
4420   //      Interleave is only hint and Kernel can fallback to other nodes if
4421   //      no memory is available on the target nodes.
4422   // 3. With numactl --membind:
4423   //      Use numa_get_membind(v2) API to get nodes bitmask. The same API for
4424   //      interleave case returns bitmask of all nodes.
4425   // numa_all_nodes_ptr holds bitmask of all nodes.
4426   // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct
4427   // bitmask when externally configured to run on all or fewer nodes.
4428 
4429   if (!Linux::libnuma_init()) {
4430     FLAG_SET_ERGO(UseNUMA, false);
4431     FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.
4432   } else {
4433     if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
4434       // If there's only one node (they start from 0) or if the process
4435       // is bound explicitly to a single node using membind, disable NUMA
4436       UseNUMA = false;
4437     } else {
4438       LogTarget(Info,os) log;
4439       LogStream ls(log);
4440 
4441       Linux::set_configured_numa_policy(Linux::identify_numa_policy());
4442 
4443       struct bitmask* bmp = Linux::_numa_membind_bitmask;
4444       const char* numa_mode = "membind";
4445 
4446       if (Linux::is_running_in_interleave_mode()) {
4447         bmp = Linux::_numa_interleave_bitmask;
4448         numa_mode = "interleave";
4449       }
4450 
4451       ls.print("UseNUMA is enabled and invoked in '%s' mode."
4452                " Heap will be configured using NUMA memory nodes:", numa_mode);
4453 
4454       for (int node = 0; node <= Linux::numa_max_node(); node++) {
4455         if (Linux::_numa_bitmask_isbitset(bmp, node)) {
4456           ls.print(" %d", node);
4457         }
4458       }
4459     }
4460   }
4461 
4462   // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4463   if (UseNUMA && !UseNUMAInterleaving) {
4464     FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
4465   }
4466 
4467   if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
4468     // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
4469     // we can make the adaptive lgrp chunk resizing work. If the user specified both
4470     // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
4471     // and disable adaptive resizing.
4472     if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
4473       warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
4474               "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
4475       UseAdaptiveSizePolicy = false;
4476       UseAdaptiveNUMAChunkSizing = false;
4477     }
4478   }
4479 }
4480 
4481 // this is called _after_ the global arguments have been parsed
4482 jint os::init_2(void) {
4483 
4484   // This could be set after os::Posix::init() but all platforms
4485   // have to set it the same so we have to mirror Solaris.
4486   DEBUG_ONLY(os::set_mutex_init_done();)
4487 
4488   os::Posix::init_2();
4489 
4490   Linux::fast_thread_clock_init();
4491 
4492   if (PosixSignals::init() == JNI_ERR) {
4493     return JNI_ERR;
4494   }
4495 
4496   if (AdjustStackSizeForTLS) {
4497     get_minstack_init();
4498   }
4499 
4500   // Check and sets minimum stack sizes against command line options
4501   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
4502     return JNI_ERR;
4503   }
4504 
4505 #if defined(IA32) && !defined(ZERO)
4506   // Need to ensure we've determined the process's initial stack to
4507   // perform the workaround
4508   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4509   workaround_expand_exec_shield_cs_limit();
4510 #else
4511   suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
4512   if (!suppress_primordial_thread_resolution) {
4513     Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4514   }
4515 #endif
4516 
4517   Linux::libpthread_init();
4518   Linux::sched_getcpu_init();
4519   log_info(os)("HotSpot is running with %s, %s",
4520                Linux::libc_version(), Linux::libpthread_version());
4521 
4522   if (UseNUMA || UseNUMAInterleaving) {
4523     Linux::numa_init();
4524   }
4525 
4526   if (MaxFDLimit) {
4527     // set the number of file descriptors to max. print out error
4528     // if getrlimit/setrlimit fails but continue regardless.
4529     struct rlimit nbr_files;
4530     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4531     if (status != 0) {
4532       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
4533     } else {
4534       nbr_files.rlim_cur = nbr_files.rlim_max;
4535       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4536       if (status != 0) {
4537         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
4538       }
4539     }
4540   }
4541 
4542   // at-exit methods are called in the reverse order of their registration.
4543   // atexit functions are called on return from main or as a result of a
4544   // call to exit(3C). There can be only 32 of these functions registered
4545   // and atexit() does not set errno.
4546 
4547   if (PerfAllowAtExitRegistration) {
4548     // only register atexit functions if PerfAllowAtExitRegistration is set.
4549     // atexit functions can be delayed until process exit time, which
4550     // can be problematic for embedded VM situations. Embedded VMs should
4551     // call DestroyJavaVM() to assure that VM resources are released.
4552 
4553     // note: perfMemory_exit_helper atexit function may be removed in
4554     // the future if the appropriate cleanup code can be added to the
4555     // VM_Exit VMOperation's doit method.
4556     if (atexit(perfMemory_exit_helper) != 0) {
4557       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4558     }
4559   }
4560 
4561   // initialize thread priority policy
4562   prio_init();
4563 
4564   if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
4565     set_coredump_filter(DAX_SHARED_BIT);
4566   }
4567 
4568   if (DumpPrivateMappingsInCore) {
4569     set_coredump_filter(FILE_BACKED_PVT_BIT);
4570   }
4571 
4572   if (DumpSharedMappingsInCore) {
4573     set_coredump_filter(FILE_BACKED_SHARED_BIT);
4574   }
4575 
4576   if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) {
4577     // Disable code cache flushing to ensure the map file written at
4578     // exit contains all nmethods generated during execution.
4579     FLAG_SET_DEFAULT(UseCodeCacheFlushing, false);
4580   }
4581 
4582   return JNI_OK;
4583 }
4584 
4585 // older glibc versions don't have this macro (which expands to
4586 // an optimized bit-counting function) so we have to roll our own
4587 #ifndef CPU_COUNT
4588 
4589 static int _cpu_count(const cpu_set_t* cpus) {
4590   int count = 0;
4591   // only look up to the number of configured processors
4592   for (int i = 0; i < os::processor_count(); i++) {
4593     if (CPU_ISSET(i, cpus)) {
4594       count++;
4595     }
4596   }
4597   return count;
4598 }
4599 
4600 #define CPU_COUNT(cpus) _cpu_count(cpus)
4601 
4602 #endif // CPU_COUNT
4603 
4604 // Get the current number of available processors for this process.
4605 // This value can change at any time during a process's lifetime.
4606 // sched_getaffinity gives an accurate answer as it accounts for cpusets.
4607 // If it appears there may be more than 1024 processors then we do a
4608 // dynamic check - see 6515172 for details.
4609 // If anything goes wrong we fallback to returning the number of online
4610 // processors - which can be greater than the number available to the process.
4611 int os::Linux::active_processor_count() {
4612   cpu_set_t cpus;  // can represent at most 1024 (CPU_SETSIZE) processors
4613   cpu_set_t* cpus_p = &cpus;
4614   int cpus_size = sizeof(cpu_set_t);
4615 
4616   int configured_cpus = os::processor_count();  // upper bound on available cpus
4617   int cpu_count = 0;
4618 
4619 // old build platforms may not support dynamic cpu sets
4620 #ifdef CPU_ALLOC
4621 
4622   // To enable easy testing of the dynamic path on different platforms we
4623   // introduce a diagnostic flag: UseCpuAllocPath
4624   if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
4625     // kernel may use a mask bigger than cpu_set_t
4626     log_trace(os)("active_processor_count: using dynamic path %s"
4627                   "- configured processors: %d",
4628                   UseCpuAllocPath ? "(forced) " : "",
4629                   configured_cpus);
4630     cpus_p = CPU_ALLOC(configured_cpus);
4631     if (cpus_p != NULL) {
4632       cpus_size = CPU_ALLOC_SIZE(configured_cpus);
4633       // zero it just to be safe
4634       CPU_ZERO_S(cpus_size, cpus_p);
4635     }
4636     else {
4637        // failed to allocate so fallback to online cpus
4638        int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4639        log_trace(os)("active_processor_count: "
4640                      "CPU_ALLOC failed (%s) - using "
4641                      "online processor count: %d",
4642                      os::strerror(errno), online_cpus);
4643        return online_cpus;
4644     }
4645   }
4646   else {
4647     log_trace(os)("active_processor_count: using static path - configured processors: %d",
4648                   configured_cpus);
4649   }
4650 #else // CPU_ALLOC
4651 // these stubs won't be executed
4652 #define CPU_COUNT_S(size, cpus) -1
4653 #define CPU_FREE(cpus)
4654 
4655   log_trace(os)("active_processor_count: only static path available - configured processors: %d",
4656                 configured_cpus);
4657 #endif // CPU_ALLOC
4658 
4659   // pid 0 means the current thread - which we have to assume represents the process
4660   if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
4661     if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4662       cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
4663     }
4664     else {
4665       cpu_count = CPU_COUNT(cpus_p);
4666     }
4667     log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
4668   }
4669   else {
4670     cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
4671     warning("sched_getaffinity failed (%s)- using online processor count (%d) "
4672             "which may exceed available processors", os::strerror(errno), cpu_count);
4673   }
4674 
4675   if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4676     CPU_FREE(cpus_p);
4677   }
4678 
4679   assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
4680   return cpu_count;
4681 }
4682 
4683 // Determine the active processor count from one of
4684 // three different sources:
4685 //
4686 // 1. User option -XX:ActiveProcessorCount
4687 // 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
4688 // 3. extracted from cgroup cpu subsystem (shares and quotas)
4689 //
4690 // Option 1, if specified, will always override.
4691 // If the cgroup subsystem is active and configured, we
4692 // will return the min of the cgroup and option 2 results.
4693 // This is required since tools, such as numactl, that
4694 // alter cpu affinity do not update cgroup subsystem
4695 // cpuset configuration files.
4696 int os::active_processor_count() {
4697   // User has overridden the number of active processors
4698   if (ActiveProcessorCount > 0) {
4699     log_trace(os)("active_processor_count: "
4700                   "active processor count set by user : %d",
4701                   ActiveProcessorCount);
4702     return ActiveProcessorCount;
4703   }
4704 
4705   int active_cpus;
4706   if (OSContainer::is_containerized()) {
4707     active_cpus = OSContainer::active_processor_count();
4708     log_trace(os)("active_processor_count: determined by OSContainer: %d",
4709                    active_cpus);
4710   } else {
4711     active_cpus = os::Linux::active_processor_count();
4712   }
4713 
4714   return active_cpus;
4715 }
4716 
4717 static bool should_warn_invalid_processor_id() {
4718   if (os::processor_count() == 1) {
4719     // Don't warn if we only have one processor
4720     return false;
4721   }
4722 
4723   static volatile int warn_once = 1;
4724 
4725   if (Atomic::load(&warn_once) == 0 ||
4726       Atomic::xchg(&warn_once, 0) == 0) {
4727     // Don't warn more than once
4728     return false;
4729   }
4730 
4731   return true;
4732 }
4733 
4734 uint os::processor_id() {
4735   const int id = Linux::sched_getcpu();
4736 
4737   if (id < processor_count()) {
4738     return (uint)id;
4739   }
4740 
4741   // Some environments (e.g. openvz containers and the rr debugger) incorrectly
4742   // report a processor id that is higher than the number of processors available.
4743   // This is problematic, for example, when implementing CPU-local data structures,
4744   // where the processor id is used to index into an array of length processor_count().
4745   // If this happens we return 0 here. This is is safe since we always have at least
4746   // one processor, but it's not optimal for performance if we're actually executing
4747   // in an environment with more than one processor.
4748   if (should_warn_invalid_processor_id()) {
4749     log_warning(os)("Invalid processor id reported by the operating system "
4750                     "(got processor id %d, valid processor id range is 0-%d)",
4751                     id, processor_count() - 1);
4752     log_warning(os)("Falling back to assuming processor id is 0. "
4753                     "This could have a negative impact on performance.");
4754   }
4755 
4756   return 0;
4757 }
4758 
4759 void os::set_native_thread_name(const char *name) {
4760   if (Linux::_pthread_setname_np) {
4761     char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
4762     snprintf(buf, sizeof(buf), "%s", name);
4763     buf[sizeof(buf) - 1] = '\0';
4764     const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
4765     // ERANGE should not happen; all other errors should just be ignored.
4766     assert(rc != ERANGE, "pthread_setname_np failed");
4767   }
4768 }
4769 
4770 ////////////////////////////////////////////////////////////////////////////////
4771 // debug support
4772 
4773 bool os::find(address addr, outputStream* st) {
4774   Dl_info dlinfo;
4775   memset(&dlinfo, 0, sizeof(dlinfo));
4776   if (dladdr(addr, &dlinfo) != 0) {
4777     st->print(PTR_FORMAT ": ", p2i(addr));
4778     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
4779       st->print("%s+" PTR_FORMAT, dlinfo.dli_sname,
4780                 p2i(addr) - p2i(dlinfo.dli_saddr));
4781     } else if (dlinfo.dli_fbase != NULL) {
4782       st->print("<offset " PTR_FORMAT ">", p2i(addr) - p2i(dlinfo.dli_fbase));
4783     } else {
4784       st->print("<absolute address>");
4785     }
4786     if (dlinfo.dli_fname != NULL) {
4787       st->print(" in %s", dlinfo.dli_fname);
4788     }
4789     if (dlinfo.dli_fbase != NULL) {
4790       st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase));
4791     }
4792     st->cr();
4793 
4794     if (Verbose) {
4795       // decode some bytes around the PC
4796       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
4797       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
4798       address       lowest = (address) dlinfo.dli_sname;
4799       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
4800       if (begin < lowest)  begin = lowest;
4801       Dl_info dlinfo2;
4802       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
4803           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
4804         end = (address) dlinfo2.dli_saddr;
4805       }
4806       Disassembler::decode(begin, end, st);
4807     }
4808     return true;
4809   }
4810   return false;
4811 }
4812 
4813 ////////////////////////////////////////////////////////////////////////////////
4814 // misc
4815 
4816 // This does not do anything on Linux. This is basically a hook for being
4817 // able to use structured exception handling (thread-local exception filters)
4818 // on, e.g., Win32.
4819 void
4820 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
4821                          JavaCallArguments* args, JavaThread* thread) {
4822   f(value, method, args, thread);
4823 }
4824 
4825 void os::print_statistics() {
4826 }
4827 
4828 bool os::message_box(const char* title, const char* message) {
4829   int i;
4830   fdStream err(defaultStream::error_fd());
4831   for (i = 0; i < 78; i++) err.print_raw("=");
4832   err.cr();
4833   err.print_raw_cr(title);
4834   for (i = 0; i < 78; i++) err.print_raw("-");
4835   err.cr();
4836   err.print_raw_cr(message);
4837   for (i = 0; i < 78; i++) err.print_raw("=");
4838   err.cr();
4839 
4840   char buf[16];
4841   // Prevent process from exiting upon "read error" without consuming all CPU
4842   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4843 
4844   return buf[0] == 'y' || buf[0] == 'Y';
4845 }
4846 
4847 // Is a (classpath) directory empty?
4848 bool os::dir_is_empty(const char* path) {
4849   DIR *dir = NULL;
4850   struct dirent *ptr;
4851 
4852   dir = opendir(path);
4853   if (dir == NULL) return true;
4854 
4855   // Scan the directory
4856   bool result = true;
4857   while (result && (ptr = readdir(dir)) != NULL) {
4858     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4859       result = false;
4860     }
4861   }
4862   closedir(dir);
4863   return result;
4864 }
4865 
4866 // This code originates from JDK's sysOpen and open64_w
4867 // from src/solaris/hpi/src/system_md.c
4868 
4869 int os::open(const char *path, int oflag, int mode) {
4870   if (strlen(path) > MAX_PATH - 1) {
4871     errno = ENAMETOOLONG;
4872     return -1;
4873   }
4874 
4875   // All file descriptors that are opened in the Java process and not
4876   // specifically destined for a subprocess should have the close-on-exec
4877   // flag set.  If we don't set it, then careless 3rd party native code
4878   // might fork and exec without closing all appropriate file descriptors
4879   // (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
4880   // turn might:
4881   //
4882   // - cause end-of-file to fail to be detected on some file
4883   //   descriptors, resulting in mysterious hangs, or
4884   //
4885   // - might cause an fopen in the subprocess to fail on a system
4886   //   suffering from bug 1085341.
4887   //
4888   // (Yes, the default setting of the close-on-exec flag is a Unix
4889   // design flaw)
4890   //
4891   // See:
4892   // 1085341: 32-bit stdio routines should support file descriptors >255
4893   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4894   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4895   //
4896   // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
4897   // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
4898   // because it saves a system call and removes a small window where the flag
4899   // is unset.  On ancient Linux kernels the O_CLOEXEC flag will be ignored
4900   // and we fall back to using FD_CLOEXEC (see below).
4901 #ifdef O_CLOEXEC
4902   oflag |= O_CLOEXEC;
4903 #endif
4904 
4905   int fd = ::open64(path, oflag, mode);
4906   if (fd == -1) return -1;
4907 
4908   //If the open succeeded, the file might still be a directory
4909   {
4910     struct stat64 buf64;
4911     int ret = ::fstat64(fd, &buf64);
4912     int st_mode = buf64.st_mode;
4913 
4914     if (ret != -1) {
4915       if ((st_mode & S_IFMT) == S_IFDIR) {
4916         errno = EISDIR;
4917         ::close(fd);
4918         return -1;
4919       }
4920     } else {
4921       ::close(fd);
4922       return -1;
4923     }
4924   }
4925 
4926 #ifdef FD_CLOEXEC
4927   // Validate that the use of the O_CLOEXEC flag on open above worked.
4928   // With recent kernels, we will perform this check exactly once.
4929   static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
4930   if (!O_CLOEXEC_is_known_to_work) {
4931     int flags = ::fcntl(fd, F_GETFD);
4932     if (flags != -1) {
4933       if ((flags & FD_CLOEXEC) != 0)
4934         O_CLOEXEC_is_known_to_work = 1;
4935       else
4936         ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4937     }
4938   }
4939 #endif
4940 
4941   return fd;
4942 }
4943 
4944 
4945 // create binary file, rewriting existing file if required
4946 int os::create_binary_file(const char* path, bool rewrite_existing) {
4947   int oflags = O_WRONLY | O_CREAT;
4948   oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
4949   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4950 }
4951 
4952 // return current position of file pointer
4953 jlong os::current_file_offset(int fd) {
4954   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4955 }
4956 
4957 // move file pointer to the specified offset
4958 jlong os::seek_to_file_offset(int fd, jlong offset) {
4959   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4960 }
4961 
4962 // This code originates from JDK's sysAvailable
4963 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4964 
4965 int os::available(int fd, jlong *bytes) {
4966   jlong cur, end;
4967   int mode;
4968   struct stat64 buf64;
4969 
4970   if (::fstat64(fd, &buf64) >= 0) {
4971     mode = buf64.st_mode;
4972     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4973       int n;
4974       if (::ioctl(fd, FIONREAD, &n) >= 0) {
4975         *bytes = n;
4976         return 1;
4977       }
4978     }
4979   }
4980   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4981     return 0;
4982   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4983     return 0;
4984   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4985     return 0;
4986   }
4987   *bytes = end - cur;
4988   return 1;
4989 }
4990 
4991 // Map a block of memory.
4992 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4993                         char *addr, size_t bytes, bool read_only,
4994                         bool allow_exec) {
4995   int prot;
4996   int flags = MAP_PRIVATE;
4997 
4998   if (read_only) {
4999     prot = PROT_READ;
5000   } else {
5001     prot = PROT_READ | PROT_WRITE;
5002   }
5003 
5004   if (allow_exec) {
5005     prot |= PROT_EXEC;
5006   }
5007 
5008   if (addr != NULL) {
5009     flags |= MAP_FIXED;
5010   }
5011 
5012   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5013                                      fd, file_offset);
5014   if (mapped_address == MAP_FAILED) {
5015     return NULL;
5016   }
5017   return mapped_address;
5018 }
5019 
5020 
5021 // Remap a block of memory.
5022 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5023                           char *addr, size_t bytes, bool read_only,
5024                           bool allow_exec) {
5025   // same as map_memory() on this OS
5026   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5027                         allow_exec);
5028 }
5029 
5030 
5031 // Unmap a block of memory.
5032 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5033   return munmap(addr, bytes) == 0;
5034 }
5035 
5036 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5037 
5038 static jlong fast_cpu_time(Thread *thread) {
5039     clockid_t clockid;
5040     int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(),
5041                                               &clockid);
5042     if (rc == 0) {
5043       return os::Linux::fast_thread_cpu_time(clockid);
5044     } else {
5045       // It's possible to encounter a terminated native thread that failed
5046       // to detach itself from the VM - which should result in ESRCH.
5047       assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
5048       return -1;
5049     }
5050 }
5051 
5052 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5053 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5054 // of a thread.
5055 //
5056 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5057 // the fast estimate available on the platform.
5058 
5059 jlong os::current_thread_cpu_time() {
5060   if (os::Linux::supports_fast_thread_cpu_time()) {
5061     return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5062   } else {
5063     // return user + sys since the cost is the same
5064     return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5065   }
5066 }
5067 
5068 jlong os::thread_cpu_time(Thread* thread) {
5069   // consistent with what current_thread_cpu_time() returns
5070   if (os::Linux::supports_fast_thread_cpu_time()) {
5071     return fast_cpu_time(thread);
5072   } else {
5073     return slow_thread_cpu_time(thread, true /* user + sys */);
5074   }
5075 }
5076 
5077 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5078   if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5079     return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5080   } else {
5081     return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5082   }
5083 }
5084 
5085 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5086   if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5087     return fast_cpu_time(thread);
5088   } else {
5089     return slow_thread_cpu_time(thread, user_sys_cpu_time);
5090   }
5091 }
5092 
5093 //  -1 on error.
5094 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5095   pid_t  tid = thread->osthread()->thread_id();
5096   char *s;
5097   char stat[2048];
5098   int statlen;
5099   char proc_name[64];
5100   int count;
5101   long sys_time, user_time;
5102   char cdummy;
5103   int idummy;
5104   long ldummy;
5105   FILE *fp;
5106 
5107   snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
5108   fp = os::fopen(proc_name, "r");
5109   if (fp == NULL) return -1;
5110   statlen = fread(stat, 1, 2047, fp);
5111   stat[statlen] = '\0';
5112   fclose(fp);
5113 
5114   // Skip pid and the command string. Note that we could be dealing with
5115   // weird command names, e.g. user could decide to rename java launcher
5116   // to "java 1.4.2 :)", then the stat file would look like
5117   //                1234 (java 1.4.2 :)) R ... ...
5118   // We don't really need to know the command string, just find the last
5119   // occurrence of ")" and then start parsing from there. See bug 4726580.
5120   s = strrchr(stat, ')');
5121   if (s == NULL) return -1;
5122 
5123   // Skip blank chars
5124   do { s++; } while (s && isspace(*s));
5125 
5126   count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5127                  &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5128                  &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5129                  &user_time, &sys_time);
5130   if (count != 13) return -1;
5131   if (user_sys_cpu_time) {
5132     return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5133   } else {
5134     return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5135   }
5136 }
5137 
5138 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5139   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
5140   info_ptr->may_skip_backward = false;     // elapsed time not wall time
5141   info_ptr->may_skip_forward = false;      // elapsed time not wall time
5142   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
5143 }
5144 
5145 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5146   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
5147   info_ptr->may_skip_backward = false;     // elapsed time not wall time
5148   info_ptr->may_skip_forward = false;      // elapsed time not wall time
5149   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
5150 }
5151 
5152 bool os::is_thread_cpu_time_supported() {
5153   return true;
5154 }
5155 
5156 // System loadavg support.  Returns -1 if load average cannot be obtained.
5157 // Linux doesn't yet have a (official) notion of processor sets,
5158 // so just return the system wide load average.
5159 int os::loadavg(double loadavg[], int nelem) {
5160   return ::getloadavg(loadavg, nelem);
5161 }
5162 
5163 void os::pause() {
5164   char filename[MAX_PATH];
5165   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5166     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5167   } else {
5168     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5169   }
5170 
5171   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5172   if (fd != -1) {
5173     struct stat buf;
5174     ::close(fd);
5175     while (::stat(filename, &buf) == 0) {
5176       (void)::poll(NULL, 0, 100);
5177     }
5178   } else {
5179     jio_fprintf(stderr,
5180                 "Could not open pause file '%s', continuing immediately.\n", filename);
5181   }
5182 }
5183 
5184 // Get the default path to the core file
5185 // Returns the length of the string
5186 int os::get_core_path(char* buffer, size_t bufferSize) {
5187   /*
5188    * Max length of /proc/sys/kernel/core_pattern is 128 characters.
5189    * See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
5190    */
5191   const int core_pattern_len = 129;
5192   char core_pattern[core_pattern_len] = {0};
5193 
5194   int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
5195   if (core_pattern_file == -1) {
5196     return -1;
5197   }
5198 
5199   ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
5200   ::close(core_pattern_file);
5201   if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') {
5202     return -1;
5203   }
5204   if (core_pattern[ret-1] == '\n') {
5205     core_pattern[ret-1] = '\0';
5206   } else {
5207     core_pattern[ret] = '\0';
5208   }
5209 
5210   // Replace the %p in the core pattern with the process id. NOTE: we do this
5211   // only if the pattern doesn't start with "|", and we support only one %p in
5212   // the pattern.
5213   char *pid_pos = strstr(core_pattern, "%p");
5214   const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";  // skip over the "%p"
5215   int written;
5216 
5217   if (core_pattern[0] == '/') {
5218     if (pid_pos != NULL) {
5219       *pid_pos = '\0';
5220       written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,
5221                              current_process_id(), tail);
5222     } else {
5223       written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
5224     }
5225   } else {
5226     char cwd[PATH_MAX];
5227 
5228     const char* p = get_current_directory(cwd, PATH_MAX);
5229     if (p == NULL) {
5230       return -1;
5231     }
5232 
5233     if (core_pattern[0] == '|') {
5234       written = jio_snprintf(buffer, bufferSize,
5235                              "\"%s\" (or dumping to %s/core.%d)",
5236                              &core_pattern[1], p, current_process_id());
5237     } else if (pid_pos != NULL) {
5238       *pid_pos = '\0';
5239       written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,
5240                              current_process_id(), tail);
5241     } else {
5242       written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
5243     }
5244   }
5245 
5246   if (written < 0) {
5247     return -1;
5248   }
5249 
5250   if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) {
5251     int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
5252 
5253     if (core_uses_pid_file != -1) {
5254       char core_uses_pid = 0;
5255       ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
5256       ::close(core_uses_pid_file);
5257 
5258       if (core_uses_pid == '1') {
5259         jio_snprintf(buffer + written, bufferSize - written,
5260                                           ".%d", current_process_id());
5261       }
5262     }
5263   }
5264 
5265   return strlen(buffer);
5266 }
5267 
5268 bool os::start_debugging(char *buf, int buflen) {
5269   int len = (int)strlen(buf);
5270   char *p = &buf[len];
5271 
5272   jio_snprintf(p, buflen-len,
5273                "\n\n"
5274                "Do you want to debug the problem?\n\n"
5275                "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n"
5276                "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
5277                "Otherwise, press RETURN to abort...",
5278                os::current_process_id(), os::current_process_id(),
5279                os::current_thread_id(), os::current_thread_id());
5280 
5281   bool yes = os::message_box("Unexpected Error", buf);
5282 
5283   if (yes) {
5284     // yes, user asked VM to launch debugger
5285     jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d",
5286                  os::current_process_id(), os::current_process_id());
5287 
5288     os::fork_and_exec(buf);
5289     yes = false;
5290   }
5291   return yes;
5292 }
5293 
5294 
5295 // Java/Compiler thread:
5296 //
5297 //   Low memory addresses
5298 // P0 +------------------------+
5299 //    |                        |\  Java thread created by VM does not have glibc
5300 //    |    glibc guard page    | - guard page, attached Java thread usually has
5301 //    |                        |/  1 glibc guard page.
5302 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5303 //    |                        |\
5304 //    |  HotSpot Guard Pages   | - red, yellow and reserved pages
5305 //    |                        |/
5306 //    +------------------------+ StackOverflow::stack_reserved_zone_base()
5307 //    |                        |\
5308 //    |      Normal Stack      | -
5309 //    |                        |/
5310 // P2 +------------------------+ Thread::stack_base()
5311 //
5312 // Non-Java thread:
5313 //
5314 //   Low memory addresses
5315 // P0 +------------------------+
5316 //    |                        |\
5317 //    |  glibc guard page      | - usually 1 page
5318 //    |                        |/
5319 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5320 //    |                        |\
5321 //    |      Normal Stack      | -
5322 //    |                        |/
5323 // P2 +------------------------+ Thread::stack_base()
5324 //
5325 // ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size
5326 //    returned from pthread_attr_getstack().
5327 // ** Due to NPTL implementation error, linux takes the glibc guard page out
5328 //    of the stack size given in pthread_attr. We work around this for
5329 //    threads created by the VM. (We adapt bottom to be P1 and size accordingly.)
5330 //
5331 #ifndef ZERO
5332 static void current_stack_region(address * bottom, size_t * size) {
5333   if (os::is_primordial_thread()) {
5334     // primordial thread needs special handling because pthread_getattr_np()
5335     // may return bogus value.
5336     *bottom = os::Linux::initial_thread_stack_bottom();
5337     *size   = os::Linux::initial_thread_stack_size();
5338   } else {
5339     pthread_attr_t attr;
5340 
5341     int rslt = pthread_getattr_np(pthread_self(), &attr);
5342 
5343     // JVM needs to know exact stack location, abort if it fails
5344     if (rslt != 0) {
5345       if (rslt == ENOMEM) {
5346         vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
5347       } else {
5348         fatal("pthread_getattr_np failed with error = %d", rslt);
5349       }
5350     }
5351 
5352     if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
5353       fatal("Cannot locate current stack attributes!");
5354     }
5355 
5356     // Work around NPTL stack guard error.
5357     size_t guard_size = 0;
5358     rslt = pthread_attr_getguardsize(&attr, &guard_size);
5359     if (rslt != 0) {
5360       fatal("pthread_attr_getguardsize failed with error = %d", rslt);
5361     }
5362     *bottom += guard_size;
5363     *size   -= guard_size;
5364 
5365     pthread_attr_destroy(&attr);
5366 
5367   }
5368   assert(os::current_stack_pointer() >= *bottom &&
5369          os::current_stack_pointer() < *bottom + *size, "just checking");
5370 }
5371 
5372 address os::current_stack_base() {
5373   address bottom;
5374   size_t size;
5375   current_stack_region(&bottom, &size);
5376   return (bottom + size);
5377 }
5378 
5379 size_t os::current_stack_size() {
5380   // This stack size includes the usable stack and HotSpot guard pages
5381   // (for the threads that have Hotspot guard pages).
5382   address bottom;
5383   size_t size;
5384   current_stack_region(&bottom, &size);
5385   return size;
5386 }
5387 #endif
5388 
5389 static inline struct timespec get_mtime(const char* filename) {
5390   struct stat st;
5391   int ret = os::stat(filename, &st);
5392   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
5393   return st.st_mtim;
5394 }
5395 
5396 int os::compare_file_modified_times(const char* file1, const char* file2) {
5397   struct timespec filetime1 = get_mtime(file1);
5398   struct timespec filetime2 = get_mtime(file2);
5399   int diff = filetime1.tv_sec - filetime2.tv_sec;
5400   if (diff == 0) {
5401     return filetime1.tv_nsec - filetime2.tv_nsec;
5402   }
5403   return diff;
5404 }
5405 
5406 bool os::supports_map_sync() {
5407   return true;
5408 }
5409 
5410 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
5411   // Note: all ranges are "[..)"
5412   unsigned long long start = (unsigned long long)addr;
5413   unsigned long long end = start + bytes;
5414   FILE* f = os::fopen("/proc/self/maps", "r");
5415   int num_found = 0;
5416   if (f != NULL) {
5417     st->print_cr("Range [%llx-%llx) contains: ", start, end);
5418     char line[512];
5419     while(fgets(line, sizeof(line), f) == line) {
5420       unsigned long long segment_start = 0;
5421       unsigned long long segment_end = 0;
5422       if (::sscanf(line, "%llx-%llx", &segment_start, &segment_end) == 2) {
5423         // Lets print out every range which touches ours.
5424         if (segment_start < end && segment_end > start) {
5425           num_found ++;
5426           st->print("%s", line); // line includes \n
5427         }
5428       }
5429     }
5430     ::fclose(f);
5431     if (num_found == 0) {
5432       st->print_cr("nothing.");
5433     }
5434     st->cr();
5435   }
5436 }