1 /*
   2  * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2015, 2022 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 // no precompiled headers
  27 #include "jvm.h"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "jvmtifiles/jvmti.h"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "os_linux.inline.hpp"
  40 #include "os_posix.inline.hpp"
  41 #include "os_share_linux.hpp"
  42 #include "osContainer_linux.hpp"
  43 #include "prims/jniFastGetField.hpp"
  44 #include "prims/jvm_misc.hpp"
  45 #include "runtime/arguments.hpp"
  46 #include "runtime/atomic.hpp"
  47 #include "runtime/globals.hpp"
  48 #include "runtime/globals_extension.hpp"
  49 #include "runtime/interfaceSupport.inline.hpp"
  50 #include "runtime/init.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "runtime/objectMonitor.hpp"
  55 #include "runtime/osThread.hpp"
  56 #include "runtime/perfMemory.hpp"
  57 #include "runtime/sharedRuntime.hpp"
  58 #include "runtime/statSampler.hpp"
  59 #include "runtime/stubRoutines.hpp"
  60 #include "runtime/thread.inline.hpp"
  61 #include "runtime/threadCritical.hpp"
  62 #include "runtime/threadSMR.hpp"
  63 #include "runtime/timer.hpp"
  64 #include "runtime/vm_version.hpp"
  65 #include "signals_posix.hpp"
  66 #include "semaphore_posix.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/elfFile.hpp"
  74 #include "utilities/growableArray.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/powerOfTwo.hpp"
  77 #include "utilities/vmError.hpp"
  78 
  79 // put OS-includes here
  80 # include <sys/types.h>
  81 # include <sys/mman.h>
  82 # include <sys/stat.h>
  83 # include <sys/select.h>
  84 # include <pthread.h>
  85 # include <signal.h>
  86 # include <endian.h>
  87 # include <errno.h>
  88 # include <dlfcn.h>
  89 # include <stdio.h>
  90 # include <unistd.h>
  91 # include <sys/resource.h>
  92 # include <pthread.h>
  93 # include <sys/stat.h>
  94 # include <sys/time.h>
  95 # include <sys/times.h>
  96 # include <sys/utsname.h>
  97 # include <sys/socket.h>
  98 # include <pwd.h>
  99 # include <poll.h>
 100 # include <fcntl.h>
 101 # include <string.h>
 102 # include <syscall.h>
 103 # include <sys/sysinfo.h>
 104 # include <sys/ipc.h>
 105 # include <sys/shm.h>
 106 # include <link.h>
 107 # include <stdint.h>
 108 # include <inttypes.h>
 109 # include <sys/ioctl.h>
 110 # include <linux/elf-em.h>
 111 #ifdef __GLIBC__
 112 # include <malloc.h>
 113 #endif
 114 
 115 #ifndef _GNU_SOURCE
 116   #define _GNU_SOURCE
 117   #include <sched.h>
 118   #undef _GNU_SOURCE
 119 #else
 120   #include <sched.h>
 121 #endif
 122 
 123 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 124 // getrusage() is prepared to handle the associated failure.
 125 #ifndef RUSAGE_THREAD
 126   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 127 #endif
 128 
 129 #define MAX_PATH    (2 * K)
 130 
 131 #define MAX_SECS 100000000
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 
 136 #ifdef MUSL_LIBC
 137 // dlvsym is not a part of POSIX
 138 // and musl libc doesn't implement it.
 139 static void *dlvsym(void *handle,
 140                     const char *symbol,
 141                     const char *version) {
 142    // load the latest version of symbol
 143    return dlsym(handle, symbol);
 144 }
 145 #endif
 146 
 147 enum CoredumpFilterBit {
 148   FILE_BACKED_PVT_BIT = 1 << 2,
 149   FILE_BACKED_SHARED_BIT = 1 << 3,
 150   LARGEPAGES_BIT = 1 << 6,
 151   DAX_SHARED_BIT = 1 << 8
 152 };
 153 
 154 ////////////////////////////////////////////////////////////////////////////////
 155 // global variables
 156 julong os::Linux::_physical_memory = 0;
 157 
 158 address   os::Linux::_initial_thread_stack_bottom = NULL;
 159 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 160 
 161 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 162 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 163 pthread_t os::Linux::_main_thread;
 164 int os::Linux::_page_size = -1;
 165 bool os::Linux::_supports_fast_thread_cpu_time = false;
 166 const char * os::Linux::_libc_version = NULL;
 167 const char * os::Linux::_libpthread_version = NULL;
 168 size_t os::Linux::_default_large_page_size = 0;
 169 
 170 #ifdef __GLIBC__
 171 os::Linux::mallinfo_func_t os::Linux::_mallinfo = NULL;
 172 os::Linux::mallinfo2_func_t os::Linux::_mallinfo2 = NULL;
 173 #endif // __GLIBC__
 174 
 175 static jlong initial_time_count=0;
 176 
 177 static int clock_tics_per_sec = 100;
 178 
 179 // If the VM might have been created on the primordial thread, we need to resolve the
 180 // primordial thread stack bounds and check if the current thread might be the
 181 // primordial thread in places. If we know that the primordial thread is never used,
 182 // such as when the VM was created by one of the standard java launchers, we can
 183 // avoid this
 184 static bool suppress_primordial_thread_resolution = false;
 185 
 186 // utility functions
 187 
 188 julong os::available_memory() {
 189   return Linux::available_memory();
 190 }
 191 
 192 julong os::Linux::available_memory() {
 193   // values in struct sysinfo are "unsigned long"
 194   struct sysinfo si;
 195   julong avail_mem;
 196 
 197   if (OSContainer::is_containerized()) {
 198     jlong mem_limit = OSContainer::memory_limit_in_bytes();
 199     jlong mem_usage;
 200     if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
 201       log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
 202     }
 203     if (mem_limit > 0 && mem_usage > 0) {
 204       avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
 205       log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
 206       return avail_mem;
 207     }
 208   }
 209 
 210   sysinfo(&si);
 211   avail_mem = (julong)si.freeram * si.mem_unit;
 212   log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
 213   return avail_mem;
 214 }
 215 
 216 julong os::physical_memory() {
 217   jlong phys_mem = 0;
 218   if (OSContainer::is_containerized()) {
 219     jlong mem_limit;
 220     if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
 221       log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
 222       return mem_limit;
 223     }
 224   }
 225 
 226   phys_mem = Linux::physical_memory();
 227   log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
 228   return phys_mem;
 229 }
 230 
 231 static uint64_t initial_total_ticks = 0;
 232 static uint64_t initial_steal_ticks = 0;
 233 static bool     has_initial_tick_info = false;
 234 
 235 static void next_line(FILE *f) {
 236   int c;
 237   do {
 238     c = fgetc(f);
 239   } while (c != '\n' && c != EOF);
 240 }
 241 
 242 bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
 243   FILE*         fh;
 244   uint64_t      userTicks, niceTicks, systemTicks, idleTicks;
 245   // since at least kernel 2.6 : iowait: time waiting for I/O to complete
 246   // irq: time  servicing interrupts; softirq: time servicing softirqs
 247   uint64_t      iowTicks = 0, irqTicks = 0, sirqTicks= 0;
 248   // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
 249   uint64_t      stealTicks = 0;
 250   // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
 251   // control of the Linux kernel
 252   uint64_t      guestNiceTicks = 0;
 253   int           logical_cpu = -1;
 254   const int     required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
 255   int           n;
 256 
 257   memset(pticks, 0, sizeof(CPUPerfTicks));
 258 
 259   if ((fh = fopen("/proc/stat", "r")) == NULL) {
 260     return false;
 261   }
 262 
 263   if (which_logical_cpu == -1) {
 264     n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 265             UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 266             UINT64_FORMAT " " UINT64_FORMAT " ",
 267             &userTicks, &niceTicks, &systemTicks, &idleTicks,
 268             &iowTicks, &irqTicks, &sirqTicks,
 269             &stealTicks, &guestNiceTicks);
 270   } else {
 271     // Move to next line
 272     next_line(fh);
 273 
 274     // find the line for requested cpu faster to just iterate linefeeds?
 275     for (int i = 0; i < which_logical_cpu; i++) {
 276       next_line(fh);
 277     }
 278 
 279     n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 280                UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
 281                UINT64_FORMAT " " UINT64_FORMAT " ",
 282                &logical_cpu, &userTicks, &niceTicks,
 283                &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
 284                &stealTicks, &guestNiceTicks);
 285   }
 286 
 287   fclose(fh);
 288   if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
 289     return false;
 290   }
 291   pticks->used       = userTicks + niceTicks;
 292   pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
 293   pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
 294                        iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
 295 
 296   if (n > required_tickinfo_count + 3) {
 297     pticks->steal = stealTicks;
 298     pticks->has_steal_ticks = true;
 299   } else {
 300     pticks->steal = 0;
 301     pticks->has_steal_ticks = false;
 302   }
 303 
 304   return true;
 305 }
 306 
 307 // Return true if user is running as root.
 308 
 309 bool os::have_special_privileges() {
 310   static bool init = false;
 311   static bool privileges = false;
 312   if (!init) {
 313     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 314     init = true;
 315   }
 316   return privileges;
 317 }
 318 
 319 
 320 #ifndef SYS_gettid
 321 // i386: 224, ia64: 1105, amd64: 186, sparc: 143
 322   #ifdef __ia64__
 323     #define SYS_gettid 1105
 324   #else
 325     #ifdef __i386__
 326       #define SYS_gettid 224
 327     #else
 328       #ifdef __amd64__
 329         #define SYS_gettid 186
 330       #else
 331         #ifdef __sparc__
 332           #define SYS_gettid 143
 333         #else
 334           #error define gettid for the arch
 335         #endif
 336       #endif
 337     #endif
 338   #endif
 339 #endif
 340 
 341 
 342 // pid_t gettid()
 343 //
 344 // Returns the kernel thread id of the currently running thread. Kernel
 345 // thread id is used to access /proc.
 346 pid_t os::Linux::gettid() {
 347   int rslt = syscall(SYS_gettid);
 348   assert(rslt != -1, "must be."); // old linuxthreads implementation?
 349   return (pid_t)rslt;
 350 }
 351 
 352 // Returns the amount of swap currently configured, in bytes.
 353 // This can change at any time.
 354 julong os::Linux::host_swap() {
 355   struct sysinfo si;
 356   sysinfo(&si);
 357   return (julong)si.totalswap;
 358 }
 359 
 360 // Most versions of linux have a bug where the number of processors are
 361 // determined by looking at the /proc file system.  In a chroot environment,
 362 // the system call returns 1.
 363 static bool unsafe_chroot_detected = false;
 364 static const char *unstable_chroot_error = "/proc file system not found.\n"
 365                      "Java may be unstable running multithreaded in a chroot "
 366                      "environment on Linux when /proc filesystem is not mounted.";
 367 
 368 void os::Linux::initialize_system_info() {
 369   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
 370   if (processor_count() == 1) {
 371     pid_t pid = os::Linux::gettid();
 372     char fname[32];
 373     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
 374     FILE *fp = fopen(fname, "r");
 375     if (fp == NULL) {
 376       unsafe_chroot_detected = true;
 377     } else {
 378       fclose(fp);
 379     }
 380   }
 381   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
 382   assert(processor_count() > 0, "linux error");
 383 }
 384 
 385 void os::init_system_properties_values() {
 386   // The next steps are taken in the product version:
 387   //
 388   // Obtain the JAVA_HOME value from the location of libjvm.so.
 389   // This library should be located at:
 390   // <JAVA_HOME>/lib/{client|server}/libjvm.so.
 391   //
 392   // If "/jre/lib/" appears at the right place in the path, then we
 393   // assume libjvm.so is installed in a JDK and we use this path.
 394   //
 395   // Otherwise exit with message: "Could not create the Java virtual machine."
 396   //
 397   // The following extra steps are taken in the debugging version:
 398   //
 399   // If "/jre/lib/" does NOT appear at the right place in the path
 400   // instead of exit check for $JAVA_HOME environment variable.
 401   //
 402   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
 403   // then we append a fake suffix "hotspot/libjvm.so" to this path so
 404   // it looks like libjvm.so is installed there
 405   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
 406   //
 407   // Otherwise exit.
 408   //
 409   // Important note: if the location of libjvm.so changes this
 410   // code needs to be changed accordingly.
 411 
 412   // See ld(1):
 413   //      The linker uses the following search paths to locate required
 414   //      shared libraries:
 415   //        1: ...
 416   //        ...
 417   //        7: The default directories, normally /lib and /usr/lib.
 418 #ifndef OVERRIDE_LIBPATH
 419   #if defined(_LP64)
 420     #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
 421   #else
 422     #define DEFAULT_LIBPATH "/lib:/usr/lib"
 423   #endif
 424 #else
 425   #define DEFAULT_LIBPATH OVERRIDE_LIBPATH
 426 #endif
 427 
 428 // Base path of extensions installed on the system.
 429 #define SYS_EXT_DIR     "/usr/java/packages"
 430 #define EXTENSIONS_DIR  "/lib/ext"
 431 
 432   // Buffer that fits several sprintfs.
 433   // Note that the space for the colon and the trailing null are provided
 434   // by the nulls included by the sizeof operator.
 435   const size_t bufsize =
 436     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 437          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
 438   char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 439 
 440   // sysclasspath, java_home, dll_dir
 441   {
 442     char *pslash;
 443     os::jvm_path(buf, bufsize);
 444 
 445     // Found the full path to libjvm.so.
 446     // Now cut the path to <java_home>/jre if we can.
 447     pslash = strrchr(buf, '/');
 448     if (pslash != NULL) {
 449       *pslash = '\0';            // Get rid of /libjvm.so.
 450     }
 451     pslash = strrchr(buf, '/');
 452     if (pslash != NULL) {
 453       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
 454     }
 455     Arguments::set_dll_dir(buf);
 456 
 457     if (pslash != NULL) {
 458       pslash = strrchr(buf, '/');
 459       if (pslash != NULL) {
 460         *pslash = '\0';        // Get rid of /lib.
 461       }
 462     }
 463     Arguments::set_java_home(buf);
 464     if (!set_boot_path('/', ':')) {
 465       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 466     }
 467   }
 468 
 469   // Where to look for native libraries.
 470   //
 471   // Note: Due to a legacy implementation, most of the library path
 472   // is set in the launcher. This was to accomodate linking restrictions
 473   // on legacy Linux implementations (which are no longer supported).
 474   // Eventually, all the library path setting will be done here.
 475   //
 476   // However, to prevent the proliferation of improperly built native
 477   // libraries, the new path component /usr/java/packages is added here.
 478   // Eventually, all the library path setting will be done here.
 479   {
 480     // Get the user setting of LD_LIBRARY_PATH, and prepended it. It
 481     // should always exist (until the legacy problem cited above is
 482     // addressed).
 483     const char *v = ::getenv("LD_LIBRARY_PATH");
 484     const char *v_colon = ":";
 485     if (v == NULL) { v = ""; v_colon = ""; }
 486     // That's +1 for the colon and +1 for the trailing '\0'.
 487     char *ld_library_path = NEW_C_HEAP_ARRAY(char,
 488                                              strlen(v) + 1 +
 489                                              sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1,
 490                                              mtInternal);
 491     sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon);
 492     Arguments::set_library_path(ld_library_path);
 493     FREE_C_HEAP_ARRAY(char, ld_library_path);
 494   }
 495 
 496   // Extensions directories.
 497   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
 498   Arguments::set_ext_dirs(buf);
 499 
 500   FREE_C_HEAP_ARRAY(char, buf);
 501 
 502 #undef DEFAULT_LIBPATH
 503 #undef SYS_EXT_DIR
 504 #undef EXTENSIONS_DIR
 505 }
 506 
 507 ////////////////////////////////////////////////////////////////////////////////
 508 // breakpoint support
 509 
 510 void os::breakpoint() {
 511   BREAKPOINT;
 512 }
 513 
 514 extern "C" void breakpoint() {
 515   // use debugger to set breakpoint here
 516 }
 517 
 518 //////////////////////////////////////////////////////////////////////////////
 519 // detecting pthread library
 520 
 521 void os::Linux::libpthread_init() {
 522   // Save glibc and pthread version strings.
 523 #if !defined(_CS_GNU_LIBC_VERSION) || \
 524     !defined(_CS_GNU_LIBPTHREAD_VERSION)
 525   #error "glibc too old (< 2.3.2)"
 526 #endif
 527 
 528 #ifdef MUSL_LIBC
 529   // confstr() from musl libc returns EINVAL for
 530   // _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION
 531   os::Linux::set_libc_version("musl - unknown");
 532   os::Linux::set_libpthread_version("musl - unknown");
 533 #else
 534   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
 535   assert(n > 0, "cannot retrieve glibc version");
 536   char *str = (char *)malloc(n, mtInternal);
 537   confstr(_CS_GNU_LIBC_VERSION, str, n);
 538   os::Linux::set_libc_version(str);
 539 
 540   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
 541   assert(n > 0, "cannot retrieve pthread version");
 542   str = (char *)malloc(n, mtInternal);
 543   confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
 544   os::Linux::set_libpthread_version(str);
 545 #endif
 546 }
 547 
 548 /////////////////////////////////////////////////////////////////////////////
 549 // thread stack expansion
 550 
 551 // os::Linux::manually_expand_stack() takes care of expanding the thread
 552 // stack. Note that this is normally not needed: pthread stacks allocate
 553 // thread stack using mmap() without MAP_NORESERVE, so the stack is already
 554 // committed. Therefore it is not necessary to expand the stack manually.
 555 //
 556 // Manually expanding the stack was historically needed on LinuxThreads
 557 // thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays
 558 // it is kept to deal with very rare corner cases:
 559 //
 560 // For one, user may run the VM on an own implementation of threads
 561 // whose stacks are - like the old LinuxThreads - implemented using
 562 // mmap(MAP_GROWSDOWN).
 563 //
 564 // Also, this coding may be needed if the VM is running on the primordial
 565 // thread. Normally we avoid running on the primordial thread; however,
 566 // user may still invoke the VM on the primordial thread.
 567 //
 568 // The following historical comment describes the details about running
 569 // on a thread stack allocated with mmap(MAP_GROWSDOWN):
 570 
 571 
 572 // Force Linux kernel to expand current thread stack. If "bottom" is close
 573 // to the stack guard, caller should block all signals.
 574 //
 575 // MAP_GROWSDOWN:
 576 //   A special mmap() flag that is used to implement thread stacks. It tells
 577 //   kernel that the memory region should extend downwards when needed. This
 578 //   allows early versions of LinuxThreads to only mmap the first few pages
 579 //   when creating a new thread. Linux kernel will automatically expand thread
 580 //   stack as needed (on page faults).
 581 //
 582 //   However, because the memory region of a MAP_GROWSDOWN stack can grow on
 583 //   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
 584 //   region, it's hard to tell if the fault is due to a legitimate stack
 585 //   access or because of reading/writing non-exist memory (e.g. buffer
 586 //   overrun). As a rule, if the fault happens below current stack pointer,
 587 //   Linux kernel does not expand stack, instead a SIGSEGV is sent to the
 588 //   application (see Linux kernel fault.c).
 589 //
 590 //   This Linux feature can cause SIGSEGV when VM bangs thread stack for
 591 //   stack overflow detection.
 592 //
 593 //   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
 594 //   not use MAP_GROWSDOWN.
 595 //
 596 // To get around the problem and allow stack banging on Linux, we need to
 597 // manually expand thread stack after receiving the SIGSEGV.
 598 //
 599 // There are two ways to expand thread stack to address "bottom", we used
 600 // both of them in JVM before 1.5:
 601 //   1. adjust stack pointer first so that it is below "bottom", and then
 602 //      touch "bottom"
 603 //   2. mmap() the page in question
 604 //
 605 // Now alternate signal stack is gone, it's harder to use 2. For instance,
 606 // if current sp is already near the lower end of page 101, and we need to
 607 // call mmap() to map page 100, it is possible that part of the mmap() frame
 608 // will be placed in page 100. When page 100 is mapped, it is zero-filled.
 609 // That will destroy the mmap() frame and cause VM to crash.
 610 //
 611 // The following code works by adjusting sp first, then accessing the "bottom"
 612 // page to force a page fault. Linux kernel will then automatically expand the
 613 // stack mapping.
 614 //
 615 // _expand_stack_to() assumes its frame size is less than page size, which
 616 // should always be true if the function is not inlined.
 617 
 618 static void NOINLINE _expand_stack_to(address bottom) {
 619   address sp;
 620   size_t size;
 621   volatile char *p;
 622 
 623   // Adjust bottom to point to the largest address within the same page, it
 624   // gives us a one-page buffer if alloca() allocates slightly more memory.
 625   bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
 626   bottom += os::Linux::page_size() - 1;
 627 
 628   // sp might be slightly above current stack pointer; if that's the case, we
 629   // will alloca() a little more space than necessary, which is OK. Don't use
 630   // os::current_stack_pointer(), as its result can be slightly below current
 631   // stack pointer, causing us to not alloca enough to reach "bottom".
 632   sp = (address)&sp;
 633 
 634   if (sp > bottom) {
 635     size = sp - bottom;
 636     p = (volatile char *)alloca(size);
 637     assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
 638     p[0] = '\0';
 639   }
 640 }
 641 
 642 void os::Linux::expand_stack_to(address bottom) {
 643   _expand_stack_to(bottom);
 644 }
 645 
 646 bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
 647   assert(t!=NULL, "just checking");
 648   assert(t->osthread()->expanding_stack(), "expand should be set");
 649 
 650   if (t->is_in_usable_stack(addr)) {
 651     sigset_t mask_all, old_sigset;
 652     sigfillset(&mask_all);
 653     pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
 654     _expand_stack_to(addr);
 655     pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
 656     return true;
 657   }
 658   return false;
 659 }
 660 
 661 //////////////////////////////////////////////////////////////////////////////
 662 // create new thread
 663 
 664 // Thread start routine for all newly created threads
 665 static void *thread_native_entry(Thread *thread) {
 666 
 667   thread->record_stack_base_and_size();
 668 
 669 #ifndef __GLIBC__
 670   // Try to randomize the cache line index of hot stack frames.
 671   // This helps when threads of the same stack traces evict each other's
 672   // cache lines. The threads can be either from the same JVM instance, or
 673   // from different JVM instances. The benefit is especially true for
 674   // processors with hyperthreading technology.
 675   // This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING
 676   // and we did not see any degradation in performance without `alloca()`.
 677   static int counter = 0;
 678   int pid = os::current_process_id();
 679   int random = ((pid ^ counter++) & 7) * 128;
 680   void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0
 681   // Ensure the alloca result is used in a way that prevents the compiler from eliding it.
 682   *(char *)stackmem = 1;
 683 #endif
 684 
 685   thread->initialize_thread_current();
 686 
 687   OSThread* osthread = thread->osthread();
 688   Monitor* sync = osthread->startThread_lock();
 689 
 690   osthread->set_thread_id(os::current_thread_id());
 691 
 692   if (UseNUMA) {
 693     int lgrp_id = os::numa_get_group_id();
 694     if (lgrp_id != -1) {
 695       thread->set_lgrp_id(lgrp_id);
 696     }
 697   }
 698   // initialize signal mask for this thread
 699   PosixSignals::hotspot_sigmask(thread);
 700 
 701   // initialize floating point control register
 702   os::Linux::init_thread_fpu_state();
 703 
 704   // handshaking with parent thread
 705   {
 706     MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
 707 
 708     // notify parent thread
 709     osthread->set_state(INITIALIZED);
 710     sync->notify_all();
 711 
 712     // wait until os::start_thread()
 713     while (osthread->get_state() == INITIALIZED) {
 714       sync->wait_without_safepoint_check();
 715     }
 716   }
 717 
 718   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
 719     os::current_thread_id(), (uintx) pthread_self());
 720 
 721   assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");
 722 
 723   // call one more level start routine
 724   thread->call_run();
 725 
 726   // Note: at this point the thread object may already have deleted itself.
 727   // Prevent dereferencing it from here on out.
 728   thread = NULL;
 729 
 730   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
 731     os::current_thread_id(), (uintx) pthread_self());
 732 
 733   return 0;
 734 }
 735 
 736 // On Linux, glibc places static TLS blocks (for __thread variables) on
 737 // the thread stack. This decreases the stack size actually available
 738 // to threads.
 739 //
 740 // For large static TLS sizes, this may cause threads to malfunction due
 741 // to insufficient stack space. This is a well-known issue in glibc:
 742 // http://sourceware.org/bugzilla/show_bug.cgi?id=11787.
 743 //
 744 // As a workaround, we call a private but assumed-stable glibc function,
 745 // __pthread_get_minstack() to obtain the minstack size and derive the
 746 // static TLS size from it. We then increase the user requested stack
 747 // size by this TLS size.
 748 //
 749 // Due to compatibility concerns, this size adjustment is opt-in and
 750 // controlled via AdjustStackSizeForTLS.
 751 typedef size_t (*GetMinStack)(const pthread_attr_t *attr);
 752 
 753 GetMinStack _get_minstack_func = NULL;
 754 
 755 static void get_minstack_init() {
 756   _get_minstack_func =
 757         (GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack");
 758   log_info(os, thread)("Lookup of __pthread_get_minstack %s",
 759                        _get_minstack_func == NULL ? "failed" : "succeeded");
 760 }
 761 
 762 // Returns the size of the static TLS area glibc puts on thread stacks.
 763 // The value is cached on first use, which occurs when the first thread
 764 // is created during VM initialization.
 765 static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
 766   size_t tls_size = 0;
 767   if (_get_minstack_func != NULL) {
 768     // Obtain the pthread minstack size by calling __pthread_get_minstack.
 769     size_t minstack_size = _get_minstack_func(attr);
 770 
 771     // Remove non-TLS area size included in minstack size returned
 772     // by __pthread_get_minstack() to get the static TLS size.
 773     // In glibc before 2.27, minstack size includes guard_size.
 774     // In glibc 2.27 and later, guard_size is automatically added
 775     // to the stack size by pthread_create and is no longer included
 776     // in minstack size. In both cases, the guard_size is taken into
 777     // account, so there is no need to adjust the result for that.
 778     //
 779     // Although __pthread_get_minstack() is a private glibc function,
 780     // it is expected to have a stable behavior across future glibc
 781     // versions while glibc still allocates the static TLS blocks off
 782     // the stack. Following is glibc 2.28 __pthread_get_minstack():
 783     //
 784     // size_t
 785     // __pthread_get_minstack (const pthread_attr_t *attr)
 786     // {
 787     //   return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
 788     // }
 789     //
 790     //
 791     // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'
 792     // if check is done for precaution.
 793     if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
 794       tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
 795     }
 796   }
 797 
 798   log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,
 799                        tls_size);
 800   return tls_size;
 801 }
 802 
 803 bool os::create_thread(Thread* thread, ThreadType thr_type,
 804                        size_t req_stack_size) {
 805   assert(thread->osthread() == NULL, "caller responsible");
 806 
 807   // Allocate the OSThread object
 808   OSThread* osthread = new OSThread(NULL, NULL);
 809   if (osthread == NULL) {
 810     return false;
 811   }
 812 
 813   // set the correct thread state
 814   osthread->set_thread_type(thr_type);
 815 
 816   // Initial state is ALLOCATED but not INITIALIZED
 817   osthread->set_state(ALLOCATED);
 818 
 819   thread->set_osthread(osthread);
 820 
 821   // init thread attributes
 822   pthread_attr_t attr;
 823   pthread_attr_init(&attr);
 824   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 825 
 826   // Calculate stack size if it's not specified by caller.
 827   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
 828   // In glibc versions prior to 2.7 the guard size mechanism
 829   // is not implemented properly. The posix standard requires adding
 830   // the size of the guard pages to the stack size, instead Linux
 831   // takes the space out of 'stacksize'. Thus we adapt the requested
 832   // stack_size by the size of the guard pages to mimick proper
 833   // behaviour. However, be careful not to end up with a size
 834   // of zero due to overflow. Don't add the guard page in that case.
 835   size_t guard_size = os::Linux::default_guard_size(thr_type);
 836   // Configure glibc guard page. Must happen before calling
 837   // get_static_tls_area_size(), which uses the guard_size.
 838   pthread_attr_setguardsize(&attr, guard_size);
 839 
 840   size_t stack_adjust_size = 0;
 841   if (AdjustStackSizeForTLS) {
 842     // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().
 843     stack_adjust_size += get_static_tls_area_size(&attr);
 844   } else {
 845     stack_adjust_size += guard_size;
 846   }
 847 
 848   stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size());
 849   if (stack_size <= SIZE_MAX - stack_adjust_size) {
 850     stack_size += stack_adjust_size;
 851   }
 852   assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
 853 
 854   int status = pthread_attr_setstacksize(&attr, stack_size);
 855   if (status != 0) {
 856     // pthread_attr_setstacksize() function can fail
 857     // if the stack size exceeds a system-imposed limit.
 858     assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
 859     log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
 860                             (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
 861                             stack_size / K);
 862     thread->set_osthread(NULL);
 863     delete osthread;
 864     return false;
 865   }
 866 
 867   ThreadState state;
 868 
 869   {
 870     ResourceMark rm;
 871     pthread_t tid;
 872     int ret = 0;
 873     int limit = 3;
 874     do {
 875       ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
 876     } while (ret == EAGAIN && limit-- > 0);
 877 
 878     char buf[64];
 879     if (ret == 0) {
 880       log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
 881                            thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 882     } else {
 883       log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.",
 884                               thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
 885       // Log some OS information which might explain why creating the thread failed.
 886       log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 887       LogStream st(Log(os, thread)::info());
 888       os::Posix::print_rlimit_info(&st);
 889       os::print_memory_info(&st);
 890       os::Linux::print_proc_sys_info(&st);
 891       os::Linux::print_container_info(&st);
 892     }
 893 
 894     pthread_attr_destroy(&attr);
 895 
 896     if (ret != 0) {
 897       // Need to clean up stuff we've allocated so far
 898       thread->set_osthread(NULL);
 899       delete osthread;
 900       return false;
 901     }
 902 
 903     // Store pthread info into the OSThread
 904     osthread->set_pthread_id(tid);
 905 
 906     // Wait until child thread is either initialized or aborted
 907     {
 908       Monitor* sync_with_child = osthread->startThread_lock();
 909       MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
 910       while ((state = osthread->get_state()) == ALLOCATED) {
 911         sync_with_child->wait_without_safepoint_check();
 912       }
 913     }
 914   }
 915 
 916   // The thread is returned suspended (in state INITIALIZED),
 917   // and is started higher up in the call chain
 918   assert(state == INITIALIZED, "race condition");
 919   return true;
 920 }
 921 
 922 /////////////////////////////////////////////////////////////////////////////
 923 // attach existing thread
 924 
 925 // bootstrap the main thread
 926 bool os::create_main_thread(JavaThread* thread) {
 927   assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
 928   return create_attached_thread(thread);
 929 }
 930 
 931 bool os::create_attached_thread(JavaThread* thread) {
 932 #ifdef ASSERT
 933   thread->verify_not_published();
 934 #endif
 935 
 936   // Allocate the OSThread object
 937   OSThread* osthread = new OSThread(NULL, NULL);
 938 
 939   if (osthread == NULL) {
 940     return false;
 941   }
 942 
 943   // Store pthread info into the OSThread
 944   osthread->set_thread_id(os::Linux::gettid());
 945   osthread->set_pthread_id(::pthread_self());
 946 
 947   // initialize floating point control register
 948   os::Linux::init_thread_fpu_state();
 949 
 950   // Initial thread state is RUNNABLE
 951   osthread->set_state(RUNNABLE);
 952 
 953   thread->set_osthread(osthread);
 954 
 955   if (UseNUMA) {
 956     int lgrp_id = os::numa_get_group_id();
 957     if (lgrp_id != -1) {
 958       thread->set_lgrp_id(lgrp_id);
 959     }
 960   }
 961 
 962   if (os::is_primordial_thread()) {
 963     // If current thread is primordial thread, its stack is mapped on demand,
 964     // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
 965     // the entire stack region to avoid SEGV in stack banging.
 966     // It is also useful to get around the heap-stack-gap problem on SuSE
 967     // kernel (see 4821821 for details). We first expand stack to the top
 968     // of yellow zone, then enable stack yellow zone (order is significant,
 969     // enabling yellow zone first will crash JVM on SuSE Linux), so there
 970     // is no gap between the last two virtual memory regions.
 971 
 972     StackOverflow* overflow_state = thread->stack_overflow_state();
 973     address addr = overflow_state->stack_reserved_zone_base();
 974     assert(addr != NULL, "initialization problem?");
 975     assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");
 976 
 977     osthread->set_expanding_stack();
 978     os::Linux::manually_expand_stack(thread, addr);
 979     osthread->clear_expanding_stack();
 980   }
 981 
 982   // initialize signal mask for this thread
 983   // and save the caller's signal mask
 984   PosixSignals::hotspot_sigmask(thread);
 985 
 986   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT
 987                        ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).",
 988                        os::current_thread_id(), (uintx) pthread_self(),
 989                        p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size());
 990 
 991   return true;
 992 }
 993 
 994 void os::pd_start_thread(Thread* thread) {
 995   OSThread * osthread = thread->osthread();
 996   assert(osthread->get_state() != INITIALIZED, "just checking");
 997   Monitor* sync_with_child = osthread->startThread_lock();
 998   MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
 999   sync_with_child->notify();
1000 }
1001 
1002 // Free Linux resources related to the OSThread
1003 void os::free_thread(OSThread* osthread) {
1004   assert(osthread != NULL, "osthread not set");
1005 
1006   // We are told to free resources of the argument thread,
1007   // but we can only really operate on the current thread.
1008   assert(Thread::current()->osthread() == osthread,
1009          "os::free_thread but not current thread");
1010 
1011 #ifdef ASSERT
1012   sigset_t current;
1013   sigemptyset(&current);
1014   pthread_sigmask(SIG_SETMASK, NULL, &current);
1015   assert(!sigismember(&current, PosixSignals::SR_signum), "SR signal should not be blocked!");
1016 #endif
1017 
1018   // Restore caller's signal mask
1019   sigset_t sigmask = osthread->caller_sigmask();
1020   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1021 
1022   delete osthread;
1023 }
1024 
1025 //////////////////////////////////////////////////////////////////////////////
1026 // primordial thread
1027 
1028 // Check if current thread is the primordial thread, similar to Solaris thr_main.
1029 bool os::is_primordial_thread(void) {
1030   if (suppress_primordial_thread_resolution) {
1031     return false;
1032   }
1033   char dummy;
1034   // If called before init complete, thread stack bottom will be null.
1035   // Can be called if fatal error occurs before initialization.
1036   if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
1037   assert(os::Linux::initial_thread_stack_bottom() != NULL &&
1038          os::Linux::initial_thread_stack_size()   != 0,
1039          "os::init did not locate primordial thread's stack region");
1040   if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
1041       (address)&dummy < os::Linux::initial_thread_stack_bottom() +
1042                         os::Linux::initial_thread_stack_size()) {
1043     return true;
1044   } else {
1045     return false;
1046   }
1047 }
1048 
1049 // Find the virtual memory area that contains addr
1050 static bool find_vma(address addr, address* vma_low, address* vma_high) {
1051   FILE *fp = fopen("/proc/self/maps", "r");
1052   if (fp) {
1053     address low, high;
1054     while (!feof(fp)) {
1055       if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1056         if (low <= addr && addr < high) {
1057           if (vma_low)  *vma_low  = low;
1058           if (vma_high) *vma_high = high;
1059           fclose(fp);
1060           return true;
1061         }
1062       }
1063       for (;;) {
1064         int ch = fgetc(fp);
1065         if (ch == EOF || ch == (int)'\n') break;
1066       }
1067     }
1068     fclose(fp);
1069   }
1070   return false;
1071 }
1072 
1073 // Locate primordial thread stack. This special handling of primordial thread stack
1074 // is needed because pthread_getattr_np() on most (all?) Linux distros returns
1075 // bogus value for the primordial process thread. While the launcher has created
1076 // the VM in a new thread since JDK 6, we still have to allow for the use of the
1077 // JNI invocation API from a primordial thread.
1078 void os::Linux::capture_initial_stack(size_t max_size) {
1079 
1080   // max_size is either 0 (which means accept OS default for thread stacks) or
1081   // a user-specified value known to be at least the minimum needed. If we
1082   // are actually on the primordial thread we can make it appear that we have a
1083   // smaller max_size stack by inserting the guard pages at that location. But we
1084   // cannot do anything to emulate a larger stack than what has been provided by
1085   // the OS or threading library. In fact if we try to use a stack greater than
1086   // what is set by rlimit then we will crash the hosting process.
1087 
1088   // Maximum stack size is the easy part, get it from RLIMIT_STACK.
1089   // If this is "unlimited" then it will be a huge value.
1090   struct rlimit rlim;
1091   getrlimit(RLIMIT_STACK, &rlim);
1092   size_t stack_size = rlim.rlim_cur;
1093 
1094   // 6308388: a bug in ld.so will relocate its own .data section to the
1095   //   lower end of primordial stack; reduce ulimit -s value a little bit
1096   //   so we won't install guard page on ld.so's data section.
1097   //   But ensure we don't underflow the stack size - allow 1 page spare
1098   if (stack_size >= (size_t)(3 * page_size())) {
1099     stack_size -= 2 * page_size();
1100   }
1101 
1102   // Try to figure out where the stack base (top) is. This is harder.
1103   //
1104   // When an application is started, glibc saves the initial stack pointer in
1105   // a global variable "__libc_stack_end", which is then used by system
1106   // libraries. __libc_stack_end should be pretty close to stack top. The
1107   // variable is available since the very early days. However, because it is
1108   // a private interface, it could disappear in the future.
1109   //
1110   // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1111   // to __libc_stack_end, it is very close to stack top, but isn't the real
1112   // stack top. Note that /proc may not exist if VM is running as a chroot
1113   // program, so reading /proc/<pid>/stat could fail. Also the contents of
1114   // /proc/<pid>/stat could change in the future (though unlikely).
1115   //
1116   // We try __libc_stack_end first. If that doesn't work, look for
1117   // /proc/<pid>/stat. If neither of them works, we use current stack pointer
1118   // as a hint, which should work well in most cases.
1119 
1120   uintptr_t stack_start;
1121 
1122   // try __libc_stack_end first
1123   uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1124   if (p && *p) {
1125     stack_start = *p;
1126   } else {
1127     // see if we can get the start_stack field from /proc/self/stat
1128     FILE *fp;
1129     int pid;
1130     char state;
1131     int ppid;
1132     int pgrp;
1133     int session;
1134     int nr;
1135     int tpgrp;
1136     unsigned long flags;
1137     unsigned long minflt;
1138     unsigned long cminflt;
1139     unsigned long majflt;
1140     unsigned long cmajflt;
1141     unsigned long utime;
1142     unsigned long stime;
1143     long cutime;
1144     long cstime;
1145     long prio;
1146     long nice;
1147     long junk;
1148     long it_real;
1149     uintptr_t start;
1150     uintptr_t vsize;
1151     intptr_t rss;
1152     uintptr_t rsslim;
1153     uintptr_t scodes;
1154     uintptr_t ecode;
1155     int i;
1156 
1157     // Figure what the primordial thread stack base is. Code is inspired
1158     // by email from Hans Boehm. /proc/self/stat begins with current pid,
1159     // followed by command name surrounded by parentheses, state, etc.
1160     char stat[2048];
1161     int statlen;
1162 
1163     fp = fopen("/proc/self/stat", "r");
1164     if (fp) {
1165       statlen = fread(stat, 1, 2047, fp);
1166       stat[statlen] = '\0';
1167       fclose(fp);
1168 
1169       // Skip pid and the command string. Note that we could be dealing with
1170       // weird command names, e.g. user could decide to rename java launcher
1171       // to "java 1.4.2 :)", then the stat file would look like
1172       //                1234 (java 1.4.2 :)) R ... ...
1173       // We don't really need to know the command string, just find the last
1174       // occurrence of ")" and then start parsing from there. See bug 4726580.
1175       char * s = strrchr(stat, ')');
1176 
1177       i = 0;
1178       if (s) {
1179         // Skip blank chars
1180         do { s++; } while (s && isspace(*s));
1181 
1182 #define _UFM UINTX_FORMAT
1183 #define _DFM INTX_FORMAT
1184 
1185         //                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2
1186         //              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8
1187         i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1188                    &state,          // 3  %c
1189                    &ppid,           // 4  %d
1190                    &pgrp,           // 5  %d
1191                    &session,        // 6  %d
1192                    &nr,             // 7  %d
1193                    &tpgrp,          // 8  %d
1194                    &flags,          // 9  %lu
1195                    &minflt,         // 10 %lu
1196                    &cminflt,        // 11 %lu
1197                    &majflt,         // 12 %lu
1198                    &cmajflt,        // 13 %lu
1199                    &utime,          // 14 %lu
1200                    &stime,          // 15 %lu
1201                    &cutime,         // 16 %ld
1202                    &cstime,         // 17 %ld
1203                    &prio,           // 18 %ld
1204                    &nice,           // 19 %ld
1205                    &junk,           // 20 %ld
1206                    &it_real,        // 21 %ld
1207                    &start,          // 22 UINTX_FORMAT
1208                    &vsize,          // 23 UINTX_FORMAT
1209                    &rss,            // 24 INTX_FORMAT
1210                    &rsslim,         // 25 UINTX_FORMAT
1211                    &scodes,         // 26 UINTX_FORMAT
1212                    &ecode,          // 27 UINTX_FORMAT
1213                    &stack_start);   // 28 UINTX_FORMAT
1214       }
1215 
1216 #undef _UFM
1217 #undef _DFM
1218 
1219       if (i != 28 - 2) {
1220         assert(false, "Bad conversion from /proc/self/stat");
1221         // product mode - assume we are the primordial thread, good luck in the
1222         // embedded case.
1223         warning("Can't detect primordial thread stack location - bad conversion");
1224         stack_start = (uintptr_t) &rlim;
1225       }
1226     } else {
1227       // For some reason we can't open /proc/self/stat (for example, running on
1228       // FreeBSD with a Linux emulator, or inside chroot), this should work for
1229       // most cases, so don't abort:
1230       warning("Can't detect primordial thread stack location - no /proc/self/stat");
1231       stack_start = (uintptr_t) &rlim;
1232     }
1233   }
1234 
1235   // Now we have a pointer (stack_start) very close to the stack top, the
1236   // next thing to do is to figure out the exact location of stack top. We
1237   // can find out the virtual memory area that contains stack_start by
1238   // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1239   // and its upper limit is the real stack top. (again, this would fail if
1240   // running inside chroot, because /proc may not exist.)
1241 
1242   uintptr_t stack_top;
1243   address low, high;
1244   if (find_vma((address)stack_start, &low, &high)) {
1245     // success, "high" is the true stack top. (ignore "low", because initial
1246     // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1247     stack_top = (uintptr_t)high;
1248   } else {
1249     // failed, likely because /proc/self/maps does not exist
1250     warning("Can't detect primordial thread stack location - find_vma failed");
1251     // best effort: stack_start is normally within a few pages below the real
1252     // stack top, use it as stack top, and reduce stack size so we won't put
1253     // guard page outside stack.
1254     stack_top = stack_start;
1255     stack_size -= 16 * page_size();
1256   }
1257 
1258   // stack_top could be partially down the page so align it
1259   stack_top = align_up(stack_top, page_size());
1260 
1261   // Allowed stack value is minimum of max_size and what we derived from rlimit
1262   if (max_size > 0) {
1263     _initial_thread_stack_size = MIN2(max_size, stack_size);
1264   } else {
1265     // Accept the rlimit max, but if stack is unlimited then it will be huge, so
1266     // clamp it at 8MB as we do on Solaris
1267     _initial_thread_stack_size = MIN2(stack_size, 8*M);
1268   }
1269   _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
1270   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1271 
1272   assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
1273 
1274   if (log_is_enabled(Info, os, thread)) {
1275     // See if we seem to be on primordial process thread
1276     bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) &&
1277                       uintptr_t(&rlim) < stack_top;
1278 
1279     log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: "
1280                          SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT,
1281                          primordial ? "primordial" : "user", max_size / K,  _initial_thread_stack_size / K,
1282                          stack_top, intptr_t(_initial_thread_stack_bottom));
1283   }
1284 }
1285 
1286 ////////////////////////////////////////////////////////////////////////////////
1287 // time support
1288 
1289 // Time since start-up in seconds to a fine granularity.
1290 double os::elapsedTime() {
1291   return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1292 }
1293 
1294 jlong os::elapsed_counter() {
1295   return javaTimeNanos() - initial_time_count;
1296 }
1297 
1298 jlong os::elapsed_frequency() {
1299   return NANOSECS_PER_SEC; // nanosecond resolution
1300 }
1301 
1302 bool os::supports_vtime() { return true; }
1303 
1304 double os::elapsedVTime() {
1305   struct rusage usage;
1306   int retval = getrusage(RUSAGE_THREAD, &usage);
1307   if (retval == 0) {
1308     return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1309   } else {
1310     // better than nothing, but not much
1311     return elapsedTime();
1312   }
1313 }
1314 
1315 void os::Linux::fast_thread_clock_init() {
1316   if (!UseLinuxPosixThreadCPUClocks) {
1317     return;
1318   }
1319   clockid_t clockid;
1320   struct timespec tp;
1321   int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1322       (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1323 
1324   // Switch to using fast clocks for thread cpu time if
1325   // the clock_getres() returns 0 error code.
1326   // Note, that some kernels may support the current thread
1327   // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1328   // returned by the pthread_getcpuclockid().
1329   // If the fast Posix clocks are supported then the clock_getres()
1330   // must return at least tp.tv_sec == 0 which means a resolution
1331   // better than 1 sec. This is extra check for reliability.
1332 
1333   if (pthread_getcpuclockid_func &&
1334       pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1335       clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1336     _supports_fast_thread_cpu_time = true;
1337     _pthread_getcpuclockid = pthread_getcpuclockid_func;
1338   }
1339 }
1340 
1341 // Return the real, user, and system times in seconds from an
1342 // arbitrary fixed point in the past.
1343 bool os::getTimesSecs(double* process_real_time,
1344                       double* process_user_time,
1345                       double* process_system_time) {
1346   struct tms ticks;
1347   clock_t real_ticks = times(&ticks);
1348 
1349   if (real_ticks == (clock_t) (-1)) {
1350     return false;
1351   } else {
1352     double ticks_per_second = (double) clock_tics_per_sec;
1353     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1354     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1355     *process_real_time = ((double) real_ticks) / ticks_per_second;
1356 
1357     return true;
1358   }
1359 }
1360 
1361 
1362 char * os::local_time_string(char *buf, size_t buflen) {
1363   struct tm t;
1364   time_t long_time;
1365   time(&long_time);
1366   localtime_r(&long_time, &t);
1367   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1368                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1369                t.tm_hour, t.tm_min, t.tm_sec);
1370   return buf;
1371 }
1372 
1373 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
1374   return localtime_r(clock, res);
1375 }
1376 
1377 // thread_id is kernel thread id (similar to Solaris LWP id)
1378 intx os::current_thread_id() { return os::Linux::gettid(); }
1379 int os::current_process_id() {
1380   return ::getpid();
1381 }
1382 
1383 // DLL functions
1384 
1385 const char* os::dll_file_extension() { return ".so"; }
1386 
1387 // This must be hard coded because it's the system's temporary
1388 // directory not the java application's temp directory, ala java.io.tmpdir.
1389 const char* os::get_temp_directory() { return "/tmp"; }
1390 
1391 static bool file_exists(const char* filename) {
1392   struct stat statbuf;
1393   if (filename == NULL || strlen(filename) == 0) {
1394     return false;
1395   }
1396   return os::stat(filename, &statbuf) == 0;
1397 }
1398 
1399 // check if addr is inside libjvm.so
1400 bool os::address_is_in_vm(address addr) {
1401   static address libjvm_base_addr;
1402   Dl_info dlinfo;
1403 
1404   if (libjvm_base_addr == NULL) {
1405     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1406       libjvm_base_addr = (address)dlinfo.dli_fbase;
1407     }
1408     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1409   }
1410 
1411   if (dladdr((void *)addr, &dlinfo) != 0) {
1412     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1413   }
1414 
1415   return false;
1416 }
1417 
1418 bool os::dll_address_to_function_name(address addr, char *buf,
1419                                       int buflen, int *offset,
1420                                       bool demangle) {
1421   // buf is not optional, but offset is optional
1422   assert(buf != NULL, "sanity check");
1423 
1424   Dl_info dlinfo;
1425 
1426   if (dladdr((void*)addr, &dlinfo) != 0) {
1427     // see if we have a matching symbol
1428     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1429       if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1430         jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1431       }
1432       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1433       return true;
1434     }
1435     // no matching symbol so try for just file info
1436     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1437       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1438                           buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1439         return true;
1440       }
1441     }
1442   }
1443 
1444   buf[0] = '\0';
1445   if (offset != NULL) *offset = -1;
1446   return false;
1447 }
1448 
1449 struct _address_to_library_name {
1450   address addr;          // input : memory address
1451   size_t  buflen;        //         size of fname
1452   char*   fname;         // output: library name
1453   address base;          //         library base addr
1454 };
1455 
1456 static int address_to_library_name_callback(struct dl_phdr_info *info,
1457                                             size_t size, void *data) {
1458   int i;
1459   bool found = false;
1460   address libbase = NULL;
1461   struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1462 
1463   // iterate through all loadable segments
1464   for (i = 0; i < info->dlpi_phnum; i++) {
1465     address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1466     if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1467       // base address of a library is the lowest address of its loaded
1468       // segments.
1469       if (libbase == NULL || libbase > segbase) {
1470         libbase = segbase;
1471       }
1472       // see if 'addr' is within current segment
1473       if (segbase <= d->addr &&
1474           d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1475         found = true;
1476       }
1477     }
1478   }
1479 
1480   // dlpi_name is NULL or empty if the ELF file is executable, return 0
1481   // so dll_address_to_library_name() can fall through to use dladdr() which
1482   // can figure out executable name from argv[0].
1483   if (found && info->dlpi_name && info->dlpi_name[0]) {
1484     d->base = libbase;
1485     if (d->fname) {
1486       jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1487     }
1488     return 1;
1489   }
1490   return 0;
1491 }
1492 
1493 bool os::dll_address_to_library_name(address addr, char* buf,
1494                                      int buflen, int* offset) {
1495   // buf is not optional, but offset is optional
1496   assert(buf != NULL, "sanity check");
1497 
1498   Dl_info dlinfo;
1499   struct _address_to_library_name data;
1500 
1501   // There is a bug in old glibc dladdr() implementation that it could resolve
1502   // to wrong library name if the .so file has a base address != NULL. Here
1503   // we iterate through the program headers of all loaded libraries to find
1504   // out which library 'addr' really belongs to. This workaround can be
1505   // removed once the minimum requirement for glibc is moved to 2.3.x.
1506   data.addr = addr;
1507   data.fname = buf;
1508   data.buflen = buflen;
1509   data.base = NULL;
1510   int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1511 
1512   if (rslt) {
1513     // buf already contains library name
1514     if (offset) *offset = addr - data.base;
1515     return true;
1516   }
1517   if (dladdr((void*)addr, &dlinfo) != 0) {
1518     if (dlinfo.dli_fname != NULL) {
1519       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1520     }
1521     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1522       *offset = addr - (address)dlinfo.dli_fbase;
1523     }
1524     return true;
1525   }
1526 
1527   buf[0] = '\0';
1528   if (offset) *offset = -1;
1529   return false;
1530 }
1531 
1532 // Loads .dll/.so and
1533 // in case of error it checks if .dll/.so was built for the
1534 // same architecture as Hotspot is running on
1535 
1536 
1537 // Remember the stack's state. The Linux dynamic linker will change
1538 // the stack to 'executable' at most once, so we must safepoint only once.
1539 bool os::Linux::_stack_is_executable = false;
1540 
1541 // VM operation that loads a library.  This is necessary if stack protection
1542 // of the Java stacks can be lost during loading the library.  If we
1543 // do not stop the Java threads, they can stack overflow before the stacks
1544 // are protected again.
1545 class VM_LinuxDllLoad: public VM_Operation {
1546  private:
1547   const char *_filename;
1548   char *_ebuf;
1549   int _ebuflen;
1550   void *_lib;
1551  public:
1552   VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1553     _filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1554   VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1555   void doit() {
1556     _lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1557     os::Linux::_stack_is_executable = true;
1558   }
1559   void* loaded_library() { return _lib; }
1560 };
1561 
1562 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1563   void * result = NULL;
1564   bool load_attempted = false;
1565 
1566   log_info(os)("attempting shared library load of %s", filename);
1567 
1568   // Check whether the library to load might change execution rights
1569   // of the stack. If they are changed, the protection of the stack
1570   // guard pages will be lost. We need a safepoint to fix this.
1571   //
1572   // See Linux man page execstack(8) for more info.
1573   if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1574     if (!ElfFile::specifies_noexecstack(filename)) {
1575       if (!is_init_completed()) {
1576         os::Linux::_stack_is_executable = true;
1577         // This is OK - No Java threads have been created yet, and hence no
1578         // stack guard pages to fix.
1579         //
1580         // Dynamic loader will make all stacks executable after
1581         // this function returns, and will not do that again.
1582         assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
1583       } else {
1584         warning("You have loaded library %s which might have disabled stack guard. "
1585                 "The VM will try to fix the stack guard now.\n"
1586                 "It's highly recommended that you fix the library with "
1587                 "'execstack -c <libfile>', or link it with '-z noexecstack'.",
1588                 filename);
1589 
1590         JavaThread *jt = JavaThread::current();
1591         if (jt->thread_state() != _thread_in_native) {
1592           // This happens when a compiler thread tries to load a hsdis-<arch>.so file
1593           // that requires ExecStack. Cannot enter safe point. Let's give up.
1594           warning("Unable to fix stack guard. Giving up.");
1595         } else {
1596           if (!LoadExecStackDllInVMThread) {
1597             // This is for the case where the DLL has an static
1598             // constructor function that executes JNI code. We cannot
1599             // load such DLLs in the VMThread.
1600             result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1601           }
1602 
1603           ThreadInVMfromNative tiv(jt);
1604           debug_only(VMNativeEntryWrapper vew;)
1605 
1606           VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1607           VMThread::execute(&op);
1608           if (LoadExecStackDllInVMThread) {
1609             result = op.loaded_library();
1610           }
1611           load_attempted = true;
1612         }
1613       }
1614     }
1615   }
1616 
1617   if (!load_attempted) {
1618     result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1619   }
1620 
1621   if (result != NULL) {
1622     // Successful loading
1623     return result;
1624   }
1625 
1626   Elf32_Ehdr elf_head;
1627   int diag_msg_max_length=ebuflen-strlen(ebuf);
1628   char* diag_msg_buf=ebuf+strlen(ebuf);
1629 
1630   if (diag_msg_max_length==0) {
1631     // No more space in ebuf for additional diagnostics message
1632     return NULL;
1633   }
1634 
1635 
1636   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1637 
1638   if (file_descriptor < 0) {
1639     // Can't open library, report dlerror() message
1640     return NULL;
1641   }
1642 
1643   bool failed_to_read_elf_head=
1644     (sizeof(elf_head)!=
1645      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1646 
1647   ::close(file_descriptor);
1648   if (failed_to_read_elf_head) {
1649     // file i/o error - report dlerror() msg
1650     return NULL;
1651   }
1652 
1653   if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
1654     // handle invalid/out of range endianness values
1655     if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
1656       return NULL;
1657     }
1658 
1659 #if defined(VM_LITTLE_ENDIAN)
1660     // VM is LE, shared object BE
1661     elf_head.e_machine = be16toh(elf_head.e_machine);
1662 #else
1663     // VM is BE, shared object LE
1664     elf_head.e_machine = le16toh(elf_head.e_machine);
1665 #endif
1666   }
1667 
1668   typedef struct {
1669     Elf32_Half    code;         // Actual value as defined in elf.h
1670     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
1671     unsigned char elf_class;    // 32 or 64 bit
1672     unsigned char endianness;   // MSB or LSB
1673     char*         name;         // String representation
1674   } arch_t;
1675 
1676 #ifndef EM_AARCH64
1677   #define EM_AARCH64    183               /* ARM AARCH64 */
1678 #endif
1679 #ifndef EM_RISCV
1680   #define EM_RISCV      243               /* RISC-V */
1681 #endif
1682 #ifndef EM_LOONGARCH
1683   #define EM_LOONGARCH  258               /* LoongArch */
1684 #endif
1685 
1686   static const arch_t arch_array[]={
1687     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1688     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1689     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1690     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1691     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1692     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1693     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1694     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1695 #if defined(VM_LITTLE_ENDIAN)
1696     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
1697     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
1698 #else
1699     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1700     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
1701 #endif
1702     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
1703     // we only support 64 bit z architecture
1704     {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
1705     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
1706     {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
1707     {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
1708     {EM_PARISC,      EM_PARISC,  ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
1709     {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
1710     {EM_AARCH64,     EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
1711     {EM_RISCV,       EM_RISCV,   ELFCLASS64, ELFDATA2LSB, (char*)"RISC-V"},
1712     {EM_LOONGARCH,   EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"},
1713   };
1714 
1715 #if  (defined IA32)
1716   static  Elf32_Half running_arch_code=EM_386;
1717 #elif   (defined AMD64) || (defined X32)
1718   static  Elf32_Half running_arch_code=EM_X86_64;
1719 #elif  (defined IA64)
1720   static  Elf32_Half running_arch_code=EM_IA_64;
1721 #elif  (defined __sparc) && (defined _LP64)
1722   static  Elf32_Half running_arch_code=EM_SPARCV9;
1723 #elif  (defined __sparc) && (!defined _LP64)
1724   static  Elf32_Half running_arch_code=EM_SPARC;
1725 #elif  (defined __powerpc64__)
1726   static  Elf32_Half running_arch_code=EM_PPC64;
1727 #elif  (defined __powerpc__)
1728   static  Elf32_Half running_arch_code=EM_PPC;
1729 #elif  (defined AARCH64)
1730   static  Elf32_Half running_arch_code=EM_AARCH64;
1731 #elif  (defined ARM)
1732   static  Elf32_Half running_arch_code=EM_ARM;
1733 #elif  (defined S390)
1734   static  Elf32_Half running_arch_code=EM_S390;
1735 #elif  (defined ALPHA)
1736   static  Elf32_Half running_arch_code=EM_ALPHA;
1737 #elif  (defined MIPSEL)
1738   static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
1739 #elif  (defined PARISC)
1740   static  Elf32_Half running_arch_code=EM_PARISC;
1741 #elif  (defined MIPS)
1742   static  Elf32_Half running_arch_code=EM_MIPS;
1743 #elif  (defined M68K)
1744   static  Elf32_Half running_arch_code=EM_68K;
1745 #elif  (defined SH)
1746   static  Elf32_Half running_arch_code=EM_SH;
1747 #elif  (defined RISCV)
1748   static  Elf32_Half running_arch_code=EM_RISCV;
1749 #elif  (defined LOONGARCH)
1750   static  Elf32_Half running_arch_code=EM_LOONGARCH;
1751 #else
1752     #error Method os::dll_load requires that one of following is defined:\
1753         AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
1754 #endif
1755 
1756   // Identify compatibility class for VM's architecture and library's architecture
1757   // Obtain string descriptions for architectures
1758 
1759   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1760   int running_arch_index=-1;
1761 
1762   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1763     if (running_arch_code == arch_array[i].code) {
1764       running_arch_index    = i;
1765     }
1766     if (lib_arch.code == arch_array[i].code) {
1767       lib_arch.compat_class = arch_array[i].compat_class;
1768       lib_arch.name         = arch_array[i].name;
1769     }
1770   }
1771 
1772   assert(running_arch_index != -1,
1773          "Didn't find running architecture code (running_arch_code) in arch_array");
1774   if (running_arch_index == -1) {
1775     // Even though running architecture detection failed
1776     // we may still continue with reporting dlerror() message
1777     return NULL;
1778   }
1779 
1780   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1781     if (lib_arch.name != NULL) {
1782       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1783                  " (Possible cause: can't load %s .so on a %s platform)",
1784                  lib_arch.name, arch_array[running_arch_index].name);
1785     } else {
1786       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1787                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
1788                  lib_arch.code, arch_array[running_arch_index].name);
1789     }
1790     return NULL;
1791   }
1792 
1793   if (lib_arch.endianness != arch_array[running_arch_index].endianness) {
1794     ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");
1795     return NULL;
1796   }
1797 
1798   // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
1799   if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
1800     ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
1801     return NULL;
1802   }
1803 
1804   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1805     ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1806                " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
1807                (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
1808     return NULL;
1809   }
1810 
1811   return NULL;
1812 }
1813 
1814 void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
1815                                 int ebuflen) {
1816   void * result = ::dlopen(filename, RTLD_LAZY);
1817   if (result == NULL) {
1818     const char* error_report = ::dlerror();
1819     if (error_report == NULL) {
1820       error_report = "dlerror returned no error description";
1821     }
1822     if (ebuf != NULL && ebuflen > 0) {
1823       ::strncpy(ebuf, error_report, ebuflen-1);
1824       ebuf[ebuflen-1]='\0';
1825     }
1826     Events::log_dll_message(NULL, "Loading shared library %s failed, %s", filename, error_report);
1827     log_info(os)("shared library load of %s failed, %s", filename, error_report);
1828   } else {
1829     Events::log_dll_message(NULL, "Loaded shared library %s", filename);
1830     log_info(os)("shared library load of %s was successful", filename);
1831   }
1832   return result;
1833 }
1834 
1835 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
1836                                        int ebuflen) {
1837   void * result = NULL;
1838   if (LoadExecStackDllInVMThread) {
1839     result = dlopen_helper(filename, ebuf, ebuflen);
1840   }
1841 
1842   // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
1843   // library that requires an executable stack, or which does not have this
1844   // stack attribute set, dlopen changes the stack attribute to executable. The
1845   // read protection of the guard pages gets lost.
1846   //
1847   // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
1848   // may have been queued at the same time.
1849 
1850   if (!_stack_is_executable) {
1851     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1852       StackOverflow* overflow_state = jt->stack_overflow_state();
1853       if (!overflow_state->stack_guard_zone_unused() &&     // Stack not yet fully initialized
1854           overflow_state->stack_guards_enabled()) {         // No pending stack overflow exceptions
1855         if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) {
1856           warning("Attempt to reguard stack yellow zone failed.");
1857         }
1858       }
1859     }
1860   }
1861 
1862   return result;
1863 }
1864 
1865 const char* os::Linux::dll_path(void* lib) {
1866   struct link_map *lmap;
1867   const char* l_path = NULL;
1868   assert(lib != NULL, "dll_path parameter must not be NULL");
1869 
1870   int res_dli = ::dlinfo(lib, RTLD_DI_LINKMAP, &lmap);
1871   if (res_dli == 0) {
1872     l_path = lmap->l_name;
1873   }
1874   return l_path;
1875 }
1876 
1877 static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) {
1878   int fd = ::open(filename, O_RDONLY);
1879   if (fd == -1) {
1880     return false;
1881   }
1882 
1883   if (hdr != NULL) {
1884     st->print_cr("%s", hdr);
1885   }
1886 
1887   char buf[33];
1888   int bytes;
1889   buf[32] = '\0';
1890   while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) {
1891     st->print_raw(buf, bytes);
1892   }
1893 
1894   ::close(fd);
1895 
1896   return true;
1897 }
1898 
1899 static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) {
1900   st->print("%s:%c", header, same_line ? ' ' : '\n');
1901   if (!_print_ascii_file(filename, st)) {
1902     st->print_cr("<Not Available>");
1903   }
1904 }
1905 
1906 void os::print_dll_info(outputStream *st) {
1907   st->print_cr("Dynamic libraries:");
1908 
1909   char fname[32];
1910   pid_t pid = os::Linux::gettid();
1911 
1912   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
1913 
1914   if (!_print_ascii_file(fname, st)) {
1915     st->print_cr("Can not get library information for pid = %d", pid);
1916   }
1917 }
1918 
1919 struct loaded_modules_info_param {
1920   os::LoadedModulesCallbackFunc callback;
1921   void *param;
1922 };
1923 
1924 static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {
1925   if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {
1926     return 0;
1927   }
1928 
1929   struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data);
1930   address base = NULL;
1931   address top = NULL;
1932   for (int idx = 0; idx < info->dlpi_phnum; idx++) {
1933     const ElfW(Phdr) *phdr = info->dlpi_phdr + idx;
1934     if (phdr->p_type == PT_LOAD) {
1935       address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr);
1936 
1937       address phdr_base = align_down(raw_phdr_base, phdr->p_align);
1938       if ((base == NULL) || (base > phdr_base)) {
1939         base = phdr_base;
1940       }
1941 
1942       address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);
1943       if ((top == NULL) || (top < phdr_top)) {
1944         top = phdr_top;
1945       }
1946     }
1947   }
1948 
1949   return callback_param->callback(info->dlpi_name, base, top, callback_param->param);
1950 }
1951 
1952 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1953   struct loaded_modules_info_param callback_param = {callback, param};
1954   return dl_iterate_phdr(&dl_iterate_callback, &callback_param);
1955 }
1956 
1957 void os::print_os_info_brief(outputStream* st) {
1958   os::Linux::print_distro_info(st);
1959 
1960   os::Posix::print_uname_info(st);
1961 
1962   os::Linux::print_libversion_info(st);
1963 
1964 }
1965 
1966 void os::print_os_info(outputStream* st) {
1967   st->print_cr("OS:");
1968 
1969   os::Linux::print_distro_info(st);
1970 
1971   os::Posix::print_uname_info(st);
1972 
1973   os::Linux::print_uptime_info(st);
1974 
1975   // Print warning if unsafe chroot environment detected
1976   if (unsafe_chroot_detected) {
1977     st->print_cr("WARNING!! %s", unstable_chroot_error);
1978   }
1979 
1980   os::Linux::print_libversion_info(st);
1981 
1982   os::Posix::print_rlimit_info(st);
1983 
1984   os::Posix::print_load_average(st);
1985   st->cr();
1986 
1987   os::Linux::print_system_memory_info(st);
1988   st->cr();
1989 
1990   os::Linux::print_process_memory_info(st);
1991   st->cr();
1992 
1993   os::Linux::print_proc_sys_info(st);
1994   st->cr();
1995 
1996   if (os::Linux::print_ld_preload_file(st)) {
1997     st->cr();
1998   }
1999 
2000   if (os::Linux::print_container_info(st)) {
2001     st->cr();
2002   }
2003 
2004   VM_Version::print_platform_virtualization_info(st);
2005 
2006   os::Linux::print_steal_info(st);
2007 }
2008 
2009 // Try to identify popular distros.
2010 // Most Linux distributions have a /etc/XXX-release file, which contains
2011 // the OS version string. Newer Linux distributions have a /etc/lsb-release
2012 // file that also contains the OS version string. Some have more than one
2013 // /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
2014 // /etc/redhat-release.), so the order is important.
2015 // Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
2016 // their own specific XXX-release file as well as a redhat-release file.
2017 // Because of this the XXX-release file needs to be searched for before the
2018 // redhat-release file.
2019 // Since Red Hat and SuSE have an lsb-release file that is not very descriptive the
2020 // search for redhat-release / SuSE-release needs to be before lsb-release.
2021 // Since the lsb-release file is the new standard it needs to be searched
2022 // before the older style release files.
2023 // Searching system-release (Red Hat) and os-release (other Linuxes) are a
2024 // next to last resort.  The os-release file is a new standard that contains
2025 // distribution information and the system-release file seems to be an old
2026 // standard that has been replaced by the lsb-release and os-release files.
2027 // Searching for the debian_version file is the last resort.  It contains
2028 // an informative string like "6.0.6" or "wheezy/sid". Because of this
2029 // "Debian " is printed before the contents of the debian_version file.
2030 
2031 const char* distro_files[] = {
2032   "/etc/oracle-release",
2033   "/etc/mandriva-release",
2034   "/etc/mandrake-release",
2035   "/etc/sun-release",
2036   "/etc/redhat-release",
2037   "/etc/SuSE-release",
2038   "/etc/lsb-release",
2039   "/etc/turbolinux-release",
2040   "/etc/gentoo-release",
2041   "/etc/ltib-release",
2042   "/etc/angstrom-version",
2043   "/etc/system-release",
2044   "/etc/os-release",
2045   NULL };
2046 
2047 void os::Linux::print_distro_info(outputStream* st) {
2048   for (int i = 0;; i++) {
2049     const char* file = distro_files[i];
2050     if (file == NULL) {
2051       break;  // done
2052     }
2053     // If file prints, we found it.
2054     if (_print_ascii_file(file, st)) {
2055       return;
2056     }
2057   }
2058 
2059   if (file_exists("/etc/debian_version")) {
2060     st->print("Debian ");
2061     _print_ascii_file("/etc/debian_version", st);
2062   } else {
2063     st->print_cr("Linux");
2064   }
2065 }
2066 
2067 static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) {
2068   char buf[256];
2069   while (fgets(buf, sizeof(buf), fp)) {
2070     // Edit out extra stuff in expected format
2071     if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) {
2072       char* ptr = strstr(buf, "\"");  // the name is in quotes
2073       if (ptr != NULL) {
2074         ptr++; // go beyond first quote
2075         char* nl = strchr(ptr, '\"');
2076         if (nl != NULL) *nl = '\0';
2077         strncpy(distro, ptr, length);
2078       } else {
2079         ptr = strstr(buf, "=");
2080         ptr++; // go beyond equals then
2081         char* nl = strchr(ptr, '\n');
2082         if (nl != NULL) *nl = '\0';
2083         strncpy(distro, ptr, length);
2084       }
2085       return;
2086     } else if (get_first_line) {
2087       char* nl = strchr(buf, '\n');
2088       if (nl != NULL) *nl = '\0';
2089       strncpy(distro, buf, length);
2090       return;
2091     }
2092   }
2093   // print last line and close
2094   char* nl = strchr(buf, '\n');
2095   if (nl != NULL) *nl = '\0';
2096   strncpy(distro, buf, length);
2097 }
2098 
2099 static void parse_os_info(char* distro, size_t length, const char* file) {
2100   FILE* fp = fopen(file, "r");
2101   if (fp != NULL) {
2102     // if suse format, print out first line
2103     bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0);
2104     parse_os_info_helper(fp, distro, length, get_first_line);
2105     fclose(fp);
2106   }
2107 }
2108 
2109 void os::get_summary_os_info(char* buf, size_t buflen) {
2110   for (int i = 0;; i++) {
2111     const char* file = distro_files[i];
2112     if (file == NULL) {
2113       break; // ran out of distro_files
2114     }
2115     if (file_exists(file)) {
2116       parse_os_info(buf, buflen, file);
2117       return;
2118     }
2119   }
2120   // special case for debian
2121   if (file_exists("/etc/debian_version")) {
2122     strncpy(buf, "Debian ", buflen);
2123     if (buflen > 7) {
2124       parse_os_info(&buf[7], buflen-7, "/etc/debian_version");
2125     }
2126   } else {
2127     strncpy(buf, "Linux", buflen);
2128   }
2129 }
2130 
2131 void os::Linux::print_libversion_info(outputStream* st) {
2132   // libc, pthread
2133   st->print("libc: ");
2134   st->print("%s ", os::Linux::libc_version());
2135   st->print("%s ", os::Linux::libpthread_version());
2136   st->cr();
2137 }
2138 
2139 void os::Linux::print_proc_sys_info(outputStream* st) {
2140   _print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)",
2141                       "/proc/sys/kernel/threads-max", st);
2142   _print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",
2143                       "/proc/sys/vm/max_map_count", st);
2144   _print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",
2145                       "/proc/sys/kernel/pid_max", st);
2146 }
2147 
2148 void os::Linux::print_system_memory_info(outputStream* st) {
2149   _print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false);
2150   st->cr();
2151 
2152   // some information regarding THPs; for details see
2153   // https://www.kernel.org/doc/Documentation/vm/transhuge.txt
2154   _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",
2155                       "/sys/kernel/mm/transparent_hugepage/enabled", st);
2156   _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",
2157                       "/sys/kernel/mm/transparent_hugepage/defrag", st);
2158 }
2159 
2160 bool os::Linux::query_process_memory_info(os::Linux::meminfo_t* info) {
2161   FILE* f = ::fopen("/proc/self/status", "r");
2162   const int num_values = sizeof(os::Linux::meminfo_t) / sizeof(size_t);
2163   int num_found = 0;
2164   char buf[256];
2165   info->vmsize = info->vmpeak = info->vmrss = info->vmhwm = info->vmswap =
2166       info->rssanon = info->rssfile = info->rssshmem = -1;
2167   if (f != NULL) {
2168     while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) {
2169       if ( (info->vmsize == -1    && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &info->vmsize) == 1) ||
2170            (info->vmpeak == -1    && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &info->vmpeak) == 1) ||
2171            (info->vmswap == -1    && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &info->vmswap) == 1) ||
2172            (info->vmhwm == -1     && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &info->vmhwm) == 1) ||
2173            (info->vmrss == -1     && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &info->vmrss) == 1) ||
2174            (info->rssanon == -1   && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &info->rssanon) == 1) || // Needs Linux 4.5
2175            (info->rssfile == -1   && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &info->rssfile) == 1) || // Needs Linux 4.5
2176            (info->rssshmem == -1  && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &info->rssshmem) == 1)  // Needs Linux 4.5
2177            )
2178       {
2179         num_found ++;
2180       }
2181     }
2182     fclose(f);
2183     return true;
2184   }
2185   return false;
2186 }
2187 
2188 #ifdef __GLIBC__
2189 // For Glibc, print a one-liner with the malloc tunables.
2190 // Most important and popular is MALLOC_ARENA_MAX, but we are
2191 // thorough and print them all.
2192 static void print_glibc_malloc_tunables(outputStream* st) {
2193   static const char* var[] = {
2194       // the new variant
2195       "GLIBC_TUNABLES",
2196       // legacy variants
2197       "MALLOC_CHECK_", "MALLOC_TOP_PAD_", "MALLOC_PERTURB_",
2198       "MALLOC_MMAP_THRESHOLD_", "MALLOC_TRIM_THRESHOLD_",
2199       "MALLOC_MMAP_MAX_", "MALLOC_ARENA_TEST", "MALLOC_ARENA_MAX",
2200       NULL};
2201   st->print("glibc malloc tunables: ");
2202   bool printed = false;
2203   for (int i = 0; var[i] != NULL; i ++) {
2204     const char* const val = ::getenv(var[i]);
2205     if (val != NULL) {
2206       st->print("%s%s=%s", (printed ? ", " : ""), var[i], val);
2207       printed = true;
2208     }
2209   }
2210   if (!printed) {
2211     st->print("(default)");
2212   }
2213 }
2214 #endif // __GLIBC__
2215 
2216 void os::Linux::print_process_memory_info(outputStream* st) {
2217 
2218   st->print_cr("Process Memory:");
2219 
2220   // Print virtual and resident set size; peak values; swap; and for
2221   //  rss its components if the kernel is recent enough.
2222   meminfo_t info;
2223   if (query_process_memory_info(&info)) {
2224     st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmsize, info.vmpeak);
2225     st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmrss, info.vmhwm);
2226     if (info.rssanon != -1) { // requires kernel >= 4.5
2227       st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)",
2228                 info.rssanon, info.rssfile, info.rssshmem);
2229     }
2230     st->cr();
2231     if (info.vmswap != -1) { // requires kernel >= 2.6.34
2232       st->print_cr("Swapped out: " SSIZE_FORMAT "K", info.vmswap);
2233     }
2234   } else {
2235     st->print_cr("Could not open /proc/self/status to get process memory related information");
2236   }
2237 
2238   // glibc only:
2239   // - Print outstanding allocations using mallinfo
2240   // - Print glibc tunables
2241 #ifdef __GLIBC__
2242   size_t total_allocated = 0;
2243   size_t free_retained = 0;
2244   bool might_have_wrapped = false;
2245   if (_mallinfo2 != NULL) {
2246     struct glibc_mallinfo2 mi = _mallinfo2();
2247     total_allocated = mi.uordblks + mi.hblkhd;
2248     free_retained = mi.fordblks;
2249   } else if (_mallinfo != NULL) {
2250     // mallinfo is an old API. Member names mean next to nothing and, beyond that, are 32-bit signed.
2251     // So for larger footprints the values may have wrapped around. We try to detect this here: if the
2252     // process whole resident set size is smaller than 4G, malloc footprint has to be less than that
2253     // and the numbers are reliable.
2254     struct glibc_mallinfo mi = _mallinfo();
2255     total_allocated = (size_t)(unsigned)mi.uordblks + (size_t)(unsigned)mi.hblkhd;
2256     free_retained = (size_t)(unsigned)mi.fordblks;
2257     // Since mallinfo members are int, glibc values may have wrapped. Warn about this.
2258     might_have_wrapped = (info.vmrss * K) > UINT_MAX && (info.vmrss * K) > (total_allocated + UINT_MAX);
2259   }
2260   if (_mallinfo2 != NULL || _mallinfo != NULL) {
2261     st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K, retained: " SIZE_FORMAT "K%s",
2262                  total_allocated / K, free_retained / K,
2263                  might_have_wrapped ? " (may have wrapped)" : "");
2264   }
2265   // Tunables
2266   print_glibc_malloc_tunables(st);
2267   st->cr();
2268 #endif
2269 }
2270 
2271 bool os::Linux::print_ld_preload_file(outputStream* st) {
2272   return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:");
2273 }
2274 
2275 void os::Linux::print_uptime_info(outputStream* st) {
2276   struct sysinfo sinfo;
2277   int ret = sysinfo(&sinfo);
2278   if (ret == 0) {
2279     os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
2280   }
2281 }
2282 
2283 bool os::Linux::print_container_info(outputStream* st) {
2284   if (!OSContainer::is_containerized()) {
2285     st->print_cr("container information not found.");
2286     return false;
2287   }
2288 
2289   st->print_cr("container (cgroup) information:");
2290 
2291   const char *p_ct = OSContainer::container_type();
2292   st->print_cr("container_type: %s", p_ct != NULL ? p_ct : "not supported");
2293 
2294   char *p = OSContainer::cpu_cpuset_cpus();
2295   st->print_cr("cpu_cpuset_cpus: %s", p != NULL ? p : "not supported");
2296   free(p);
2297 
2298   p = OSContainer::cpu_cpuset_memory_nodes();
2299   st->print_cr("cpu_memory_nodes: %s", p != NULL ? p : "not supported");
2300   free(p);
2301 
2302   int i = OSContainer::active_processor_count();
2303   st->print("active_processor_count: ");
2304   if (i > 0) {
2305     if (ActiveProcessorCount > 0) {
2306       st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
2307     } else {
2308       st->print_cr("%d", i);
2309     }
2310   } else {
2311     st->print_cr("not supported");
2312   }
2313 
2314   i = OSContainer::cpu_quota();
2315   st->print("cpu_quota: ");
2316   if (i > 0) {
2317     st->print_cr("%d", i);
2318   } else {
2319     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
2320   }
2321 
2322   i = OSContainer::cpu_period();
2323   st->print("cpu_period: ");
2324   if (i > 0) {
2325     st->print_cr("%d", i);
2326   } else {
2327     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
2328   }
2329 
2330   i = OSContainer::cpu_shares();
2331   st->print("cpu_shares: ");
2332   if (i > 0) {
2333     st->print_cr("%d", i);
2334   } else {
2335     st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
2336   }
2337 
2338   OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes");
2339   OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes");
2340   OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes");
2341   OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes");
2342   OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes");
2343 
2344   OSContainer::print_version_specific_info(st);
2345 
2346   jlong j = OSContainer::pids_max();
2347   st->print("maximum number of tasks: ");
2348   if (j > 0) {
2349     st->print_cr(JLONG_FORMAT, j);
2350   } else {
2351     st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2352   }
2353 
2354   j = OSContainer::pids_current();
2355   st->print("current number of tasks: ");
2356   if (j > 0) {
2357     st->print_cr(JLONG_FORMAT, j);
2358   } else {
2359     if (j == OSCONTAINER_ERROR) {
2360       st->print_cr("not supported");
2361     }
2362   }
2363 
2364   return true;
2365 }
2366 
2367 void os::Linux::print_steal_info(outputStream* st) {
2368   if (has_initial_tick_info) {
2369     CPUPerfTicks pticks;
2370     bool res = os::Linux::get_tick_information(&pticks, -1);
2371 
2372     if (res && pticks.has_steal_ticks) {
2373       uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
2374       uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
2375       double steal_ticks_perc = 0.0;
2376       if (total_ticks_difference != 0) {
2377         steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
2378       }
2379       st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
2380       st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
2381     }
2382   }
2383 }
2384 
2385 void os::print_memory_info(outputStream* st) {
2386 
2387   st->print("Memory:");
2388   st->print(" %dk page", os::vm_page_size()>>10);
2389 
2390   // values in struct sysinfo are "unsigned long"
2391   struct sysinfo si;
2392   sysinfo(&si);
2393 
2394   st->print(", physical " UINT64_FORMAT "k",
2395             os::physical_memory() >> 10);
2396   st->print("(" UINT64_FORMAT "k free)",
2397             os::available_memory() >> 10);
2398   st->print(", swap " UINT64_FORMAT "k",
2399             ((jlong)si.totalswap * si.mem_unit) >> 10);
2400   st->print("(" UINT64_FORMAT "k free)",
2401             ((jlong)si.freeswap * si.mem_unit) >> 10);
2402   st->cr();
2403   st->print("Page Sizes: ");
2404   _page_sizes.print_on(st);
2405   st->cr();
2406 }
2407 
2408 // Print the first "model name" line and the first "flags" line
2409 // that we find and nothing more. We assume "model name" comes
2410 // before "flags" so if we find a second "model name", then the
2411 // "flags" field is considered missing.
2412 static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
2413 #if defined(IA32) || defined(AMD64)
2414   // Other platforms have less repetitive cpuinfo files
2415   FILE *fp = fopen("/proc/cpuinfo", "r");
2416   if (fp) {
2417     bool model_name_printed = false;
2418     while (!feof(fp)) {
2419       if (fgets(buf, buflen, fp)) {
2420         // Assume model name comes before flags
2421         if (strstr(buf, "model name") != NULL) {
2422           if (!model_name_printed) {
2423             st->print_raw("CPU Model and flags from /proc/cpuinfo:\n");
2424             st->print_raw(buf);
2425             model_name_printed = true;
2426           } else {
2427             // model name printed but not flags?  Odd, just return
2428             fclose(fp);
2429             return true;
2430           }
2431         }
2432         // print the flags line too
2433         if (strstr(buf, "flags") != NULL) {
2434           st->print_raw(buf);
2435           fclose(fp);
2436           return true;
2437         }
2438       }
2439     }
2440     fclose(fp);
2441   }
2442 #endif // x86 platforms
2443   return false;
2444 }
2445 
2446 // additional information about CPU e.g. available frequency ranges
2447 static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) {
2448   _print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st);
2449   _print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st);
2450 
2451   if (ExtensiveErrorReports) {
2452     // cache related info (cpu 0, should be similar for other CPUs)
2453     for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries
2454       char hbuf_level[60];
2455       char hbuf_type[60];
2456       char hbuf_size[60];
2457       char hbuf_coherency_line_size[80];
2458       snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i);
2459       snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i);
2460       snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i);
2461       snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i);
2462       if (file_exists(hbuf_level)) {
2463         _print_ascii_file_h("cache level", hbuf_level, st);
2464         _print_ascii_file_h("cache type", hbuf_type, st);
2465         _print_ascii_file_h("cache size", hbuf_size, st);
2466         _print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st);
2467       }
2468     }
2469   }
2470 
2471   // we miss the cpufreq entries on Power and s390x
2472 #if defined(IA32) || defined(AMD64)
2473   _print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
2474   _print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
2475   _print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
2476   // min and max should be in the Available range but still print them (not all info might be available for all kernels)
2477   if (ExtensiveErrorReports) {
2478     _print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);
2479     _print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);
2480     _print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);
2481   }
2482   // governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling
2483   if (ExtensiveErrorReports) {
2484     _print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);
2485   }
2486   _print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);
2487   // Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt
2488   // Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g.
2489   // whole chip is not fully utilized
2490   _print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st);
2491 #endif
2492 }
2493 
2494 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
2495   // Only print the model name if the platform provides this as a summary
2496   if (!print_model_name_and_flags(st, buf, buflen)) {
2497     _print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false);
2498   }
2499   st->cr();
2500   print_sys_devices_cpu_info(st, buf, buflen);
2501 }
2502 
2503 #if defined(AMD64) || defined(IA32) || defined(X32)
2504 const char* search_string = "model name";
2505 #elif defined(M68K)
2506 const char* search_string = "CPU";
2507 #elif defined(PPC64)
2508 const char* search_string = "cpu";
2509 #elif defined(S390)
2510 const char* search_string = "machine =";
2511 #elif defined(SPARC)
2512 const char* search_string = "cpu";
2513 #else
2514 const char* search_string = "Processor";
2515 #endif
2516 
2517 // Parses the cpuinfo file for string representing the model name.
2518 void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
2519   FILE* fp = fopen("/proc/cpuinfo", "r");
2520   if (fp != NULL) {
2521     while (!feof(fp)) {
2522       char buf[256];
2523       if (fgets(buf, sizeof(buf), fp)) {
2524         char* start = strstr(buf, search_string);
2525         if (start != NULL) {
2526           char *ptr = start + strlen(search_string);
2527           char *end = buf + strlen(buf);
2528           while (ptr != end) {
2529              // skip whitespace and colon for the rest of the name.
2530              if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') {
2531                break;
2532              }
2533              ptr++;
2534           }
2535           if (ptr != end) {
2536             // reasonable string, get rid of newline and keep the rest
2537             char* nl = strchr(buf, '\n');
2538             if (nl != NULL) *nl = '\0';
2539             strncpy(cpuinfo, ptr, length);
2540             fclose(fp);
2541             return;
2542           }
2543         }
2544       }
2545     }
2546     fclose(fp);
2547   }
2548   // cpuinfo not found or parsing failed, just print generic string.  The entire
2549   // /proc/cpuinfo file will be printed later in the file (or enough of it for x86)
2550 #if   defined(AARCH64)
2551   strncpy(cpuinfo, "AArch64", length);
2552 #elif defined(AMD64)
2553   strncpy(cpuinfo, "x86_64", length);
2554 #elif defined(ARM)  // Order wrt. AARCH64 is relevant!
2555   strncpy(cpuinfo, "ARM", length);
2556 #elif defined(IA32)
2557   strncpy(cpuinfo, "x86_32", length);
2558 #elif defined(IA64)
2559   strncpy(cpuinfo, "IA64", length);
2560 #elif defined(PPC)
2561   strncpy(cpuinfo, "PPC64", length);
2562 #elif defined(RISCV)
2563   strncpy(cpuinfo, "RISCV64", length);
2564 #elif defined(S390)
2565   strncpy(cpuinfo, "S390", length);
2566 #elif defined(SPARC)
2567   strncpy(cpuinfo, "sparcv9", length);
2568 #elif defined(ZERO_LIBARCH)
2569   strncpy(cpuinfo, ZERO_LIBARCH, length);
2570 #else
2571   strncpy(cpuinfo, "unknown", length);
2572 #endif
2573 }
2574 
2575 static char saved_jvm_path[MAXPATHLEN] = {0};
2576 
2577 // Find the full path to the current module, libjvm.so
2578 void os::jvm_path(char *buf, jint buflen) {
2579   // Error checking.
2580   if (buflen < MAXPATHLEN) {
2581     assert(false, "must use a large-enough buffer");
2582     buf[0] = '\0';
2583     return;
2584   }
2585   // Lazy resolve the path to current module.
2586   if (saved_jvm_path[0] != 0) {
2587     strcpy(buf, saved_jvm_path);
2588     return;
2589   }
2590 
2591   char dli_fname[MAXPATHLEN];
2592   dli_fname[0] = '\0';
2593   bool ret = dll_address_to_library_name(
2594                                          CAST_FROM_FN_PTR(address, os::jvm_path),
2595                                          dli_fname, sizeof(dli_fname), NULL);
2596   assert(ret, "cannot locate libjvm");
2597   char *rp = NULL;
2598   if (ret && dli_fname[0] != '\0') {
2599     rp = os::Posix::realpath(dli_fname, buf, buflen);
2600   }
2601   if (rp == NULL) {
2602     return;
2603   }
2604 
2605   if (Arguments::sun_java_launcher_is_altjvm()) {
2606     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2607     // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
2608     // If "/jre/lib/" appears at the right place in the string, then
2609     // assume we are installed in a JDK and we're done. Otherwise, check
2610     // for a JAVA_HOME environment variable and fix up the path so it
2611     // looks like libjvm.so is installed there (append a fake suffix
2612     // hotspot/libjvm.so).
2613     const char *p = buf + strlen(buf) - 1;
2614     for (int count = 0; p > buf && count < 5; ++count) {
2615       for (--p; p > buf && *p != '/'; --p)
2616         /* empty */ ;
2617     }
2618 
2619     if (strncmp(p, "/jre/lib/", 9) != 0) {
2620       // Look for JAVA_HOME in the environment.
2621       char* java_home_var = ::getenv("JAVA_HOME");
2622       if (java_home_var != NULL && java_home_var[0] != 0) {
2623         char* jrelib_p;
2624         int len;
2625 
2626         // Check the current module name "libjvm.so".
2627         p = strrchr(buf, '/');
2628         if (p == NULL) {
2629           return;
2630         }
2631         assert(strstr(p, "/libjvm") == p, "invalid library name");
2632 
2633         rp = os::Posix::realpath(java_home_var, buf, buflen);
2634         if (rp == NULL) {
2635           return;
2636         }
2637 
2638         // determine if this is a legacy image or modules image
2639         // modules image doesn't have "jre" subdirectory
2640         len = strlen(buf);
2641         assert(len < buflen, "Ran out of buffer room");
2642         jrelib_p = buf + len;
2643         snprintf(jrelib_p, buflen-len, "/jre/lib");
2644         if (0 != access(buf, F_OK)) {
2645           snprintf(jrelib_p, buflen-len, "/lib");
2646         }
2647 
2648         if (0 == access(buf, F_OK)) {
2649           // Use current module name "libjvm.so"
2650           len = strlen(buf);
2651           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2652         } else {
2653           // Go back to path of .so
2654           rp = os::Posix::realpath(dli_fname, buf, buflen);
2655           if (rp == NULL) {
2656             return;
2657           }
2658         }
2659       }
2660     }
2661   }
2662 
2663   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2664   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2665 }
2666 
2667 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2668   // no prefix required, not even "_"
2669 }
2670 
2671 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2672   // no suffix required
2673 }
2674 
2675 ////////////////////////////////////////////////////////////////////////////////
2676 // Virtual Memory
2677 
2678 int os::vm_page_size() {
2679   // Seems redundant as all get out
2680   assert(os::Linux::page_size() != -1, "must call os::init");
2681   return os::Linux::page_size();
2682 }
2683 
2684 // Solaris allocates memory by pages.
2685 int os::vm_allocation_granularity() {
2686   assert(os::Linux::page_size() != -1, "must call os::init");
2687   return os::Linux::page_size();
2688 }
2689 
2690 // Rationale behind this function:
2691 //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2692 //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2693 //  samples for JITted code. Here we create private executable mapping over the code cache
2694 //  and then we can use standard (well, almost, as mapping can change) way to provide
2695 //  info for the reporting script by storing timestamp and location of symbol
2696 void linux_wrap_code(char* base, size_t size) {
2697   static volatile jint cnt = 0;
2698 
2699   if (!UseOprofile) {
2700     return;
2701   }
2702 
2703   char buf[PATH_MAX+1];
2704   int num = Atomic::add(&cnt, 1);
2705 
2706   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2707            os::get_temp_directory(), os::current_process_id(), num);
2708   unlink(buf);
2709 
2710   int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2711 
2712   if (fd != -1) {
2713     off_t rv = ::lseek(fd, size-2, SEEK_SET);
2714     if (rv != (off_t)-1) {
2715       if (::write(fd, "", 1) == 1) {
2716         mmap(base, size,
2717              PROT_READ|PROT_WRITE|PROT_EXEC,
2718              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2719       }
2720     }
2721     ::close(fd);
2722     unlink(buf);
2723   }
2724 }
2725 
2726 static bool recoverable_mmap_error(int err) {
2727   // See if the error is one we can let the caller handle. This
2728   // list of errno values comes from JBS-6843484. I can't find a
2729   // Linux man page that documents this specific set of errno
2730   // values so while this list currently matches Solaris, it may
2731   // change as we gain experience with this failure mode.
2732   switch (err) {
2733   case EBADF:
2734   case EINVAL:
2735   case ENOTSUP:
2736     // let the caller deal with these errors
2737     return true;
2738 
2739   default:
2740     // Any remaining errors on this OS can cause our reserved mapping
2741     // to be lost. That can cause confusion where different data
2742     // structures think they have the same memory mapped. The worst
2743     // scenario is if both the VM and a library think they have the
2744     // same memory mapped.
2745     return false;
2746   }
2747 }
2748 
2749 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2750                                     int err) {
2751   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2752           ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
2753           os::strerror(err), err);
2754 }
2755 
2756 static void warn_fail_commit_memory(char* addr, size_t size,
2757                                     size_t alignment_hint, bool exec,
2758                                     int err) {
2759   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2760           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,
2761           alignment_hint, exec, os::strerror(err), err);
2762 }
2763 
2764 // NOTE: Linux kernel does not really reserve the pages for us.
2765 //       All it does is to check if there are enough free pages
2766 //       left at the time of mmap(). This could be a potential
2767 //       problem.
2768 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2769   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2770   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2771                                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2772   if (res != (uintptr_t) MAP_FAILED) {
2773     if (UseNUMAInterleaving) {
2774       numa_make_global(addr, size);
2775     }
2776     return 0;
2777   }
2778 
2779   int err = errno;  // save errno from mmap() call above
2780 
2781   if (!recoverable_mmap_error(err)) {
2782     warn_fail_commit_memory(addr, size, exec, err);
2783     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2784   }
2785 
2786   return err;
2787 }
2788 
2789 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2790   return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2791 }
2792 
2793 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2794                                   const char* mesg) {
2795   assert(mesg != NULL, "mesg must be specified");
2796   int err = os::Linux::commit_memory_impl(addr, size, exec);
2797   if (err != 0) {
2798     // the caller wants all commit errors to exit with the specified mesg:
2799     warn_fail_commit_memory(addr, size, exec, err);
2800     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2801   }
2802 }
2803 
2804 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2805 #ifndef MAP_HUGETLB
2806   #define MAP_HUGETLB 0x40000
2807 #endif
2808 
2809 // If mmap flags are set with MAP_HUGETLB and the system supports multiple
2810 // huge page sizes, flag bits [26:31] can be used to encode the log2 of the
2811 // desired huge page size. Otherwise, the system's default huge page size will be used.
2812 // See mmap(2) man page for more info (since Linux 3.8).
2813 // https://lwn.net/Articles/533499/
2814 #ifndef MAP_HUGE_SHIFT
2815   #define MAP_HUGE_SHIFT 26
2816 #endif
2817 
2818 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2819 #ifndef MADV_HUGEPAGE
2820   #define MADV_HUGEPAGE 14
2821 #endif
2822 
2823 int os::Linux::commit_memory_impl(char* addr, size_t size,
2824                                   size_t alignment_hint, bool exec) {
2825   int err = os::Linux::commit_memory_impl(addr, size, exec);
2826   if (err == 0) {
2827     realign_memory(addr, size, alignment_hint);
2828   }
2829   return err;
2830 }
2831 
2832 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2833                           bool exec) {
2834   return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2835 }
2836 
2837 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2838                                   size_t alignment_hint, bool exec,
2839                                   const char* mesg) {
2840   assert(mesg != NULL, "mesg must be specified");
2841   int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2842   if (err != 0) {
2843     // the caller wants all commit errors to exit with the specified mesg:
2844     warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2845     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2846   }
2847 }
2848 
2849 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2850   if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2851     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2852     // be supported or the memory may already be backed by huge pages.
2853     ::madvise(addr, bytes, MADV_HUGEPAGE);
2854   }
2855 }
2856 
2857 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2858   // This method works by doing an mmap over an existing mmaping and effectively discarding
2859   // the existing pages. However it won't work for SHM-based large pages that cannot be
2860   // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2861   // small pages on top of the SHM segment. This method always works for small pages, so we
2862   // allow that in any case.
2863   if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2864     commit_memory(addr, bytes, alignment_hint, !ExecMem);
2865   }
2866 }
2867 
2868 void os::numa_make_global(char *addr, size_t bytes) {
2869   Linux::numa_interleave_memory(addr, bytes);
2870 }
2871 
2872 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2873 // bind policy to MPOL_PREFERRED for the current thread.
2874 #define USE_MPOL_PREFERRED 0
2875 
2876 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2877   // To make NUMA and large pages more robust when both enabled, we need to ease
2878   // the requirements on where the memory should be allocated. MPOL_BIND is the
2879   // default policy and it will force memory to be allocated on the specified
2880   // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2881   // the specified node, but will not force it. Using this policy will prevent
2882   // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2883   // free large pages.
2884   Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2885   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2886 }
2887 
2888 bool os::numa_topology_changed() { return false; }
2889 
2890 size_t os::numa_get_groups_num() {
2891   // Return just the number of nodes in which it's possible to allocate memory
2892   // (in numa terminology, configured nodes).
2893   return Linux::numa_num_configured_nodes();
2894 }
2895 
2896 int os::numa_get_group_id() {
2897   int cpu_id = Linux::sched_getcpu();
2898   if (cpu_id != -1) {
2899     int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2900     if (lgrp_id != -1) {
2901       return lgrp_id;
2902     }
2903   }
2904   return 0;
2905 }
2906 
2907 int os::numa_get_group_id_for_address(const void* address) {
2908   void** pages = const_cast<void**>(&address);
2909   int id = -1;
2910 
2911   if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
2912     return -1;
2913   }
2914   if (id < 0) {
2915     return -1;
2916   }
2917   return id;
2918 }
2919 
2920 int os::Linux::get_existing_num_nodes() {
2921   int node;
2922   int highest_node_number = Linux::numa_max_node();
2923   int num_nodes = 0;
2924 
2925   // Get the total number of nodes in the system including nodes without memory.
2926   for (node = 0; node <= highest_node_number; node++) {
2927     if (is_node_in_existing_nodes(node)) {
2928       num_nodes++;
2929     }
2930   }
2931   return num_nodes;
2932 }
2933 
2934 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2935   int highest_node_number = Linux::numa_max_node();
2936   size_t i = 0;
2937 
2938   // Map all node ids in which it is possible to allocate memory. Also nodes are
2939   // not always consecutively available, i.e. available from 0 to the highest
2940   // node number. If the nodes have been bound explicitly using numactl membind,
2941   // then allocate memory from those nodes only.
2942   for (int node = 0; node <= highest_node_number; node++) {
2943     if (Linux::is_node_in_bound_nodes((unsigned int)node)) {
2944       ids[i++] = node;
2945     }
2946   }
2947   return i;
2948 }
2949 
2950 bool os::get_page_info(char *start, page_info* info) {
2951   return false;
2952 }
2953 
2954 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2955                      page_info* page_found) {
2956   return end;
2957 }
2958 
2959 
2960 int os::Linux::sched_getcpu_syscall(void) {
2961   unsigned int cpu = 0;
2962   int retval = -1;
2963 
2964 #if defined(IA32)
2965   #ifndef SYS_getcpu
2966     #define SYS_getcpu 318
2967   #endif
2968   retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2969 #elif defined(AMD64)
2970 // Unfortunately we have to bring all these macros here from vsyscall.h
2971 // to be able to compile on old linuxes.
2972   #define __NR_vgetcpu 2
2973   #define VSYSCALL_START (-10UL << 20)
2974   #define VSYSCALL_SIZE 1024
2975   #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2976   typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2977   vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2978   retval = vgetcpu(&cpu, NULL, NULL);
2979 #endif
2980 
2981   return (retval == -1) ? retval : cpu;
2982 }
2983 
2984 void os::Linux::sched_getcpu_init() {
2985   // sched_getcpu() should be in libc.
2986   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2987                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
2988 
2989   // If it's not, try a direct syscall.
2990   if (sched_getcpu() == -1) {
2991     set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2992                                     (void*)&sched_getcpu_syscall));
2993   }
2994 
2995   if (sched_getcpu() == -1) {
2996     vm_exit_during_initialization("getcpu(2) system call not supported by kernel");
2997   }
2998 }
2999 
3000 // Something to do with the numa-aware allocator needs these symbols
3001 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
3002 extern "C" JNIEXPORT void numa_error(char *where) { }
3003 
3004 // Handle request to load libnuma symbol version 1.1 (API v1). If it fails
3005 // load symbol from base version instead.
3006 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
3007   void *f = dlvsym(handle, name, "libnuma_1.1");
3008   if (f == NULL) {
3009     f = dlsym(handle, name);
3010   }
3011   return f;
3012 }
3013 
3014 // Handle request to load libnuma symbol version 1.2 (API v2) only.
3015 // Return NULL if the symbol is not defined in this particular version.
3016 void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
3017   return dlvsym(handle, name, "libnuma_1.2");
3018 }
3019 
3020 // Check numa dependent syscalls
3021 static bool numa_syscall_check() {
3022   // NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and
3023   // numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons.
3024   // Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary
3025   // to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking
3026   // others like mbind would cause unexpected side effects.
3027 #ifdef SYS_get_mempolicy
3028   int dummy = 0;
3029   if (syscall(SYS_get_mempolicy, &dummy, NULL, 0, (void*)&dummy, 3) == -1) {
3030     return false;
3031   }
3032 #endif
3033 
3034   return true;
3035 }
3036 
3037 bool os::Linux::libnuma_init() {
3038   // Requires sched_getcpu() and numa dependent syscalls support
3039   if ((sched_getcpu() != -1) && numa_syscall_check()) {
3040     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
3041     if (handle != NULL) {
3042       set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
3043                                            libnuma_dlsym(handle, "numa_node_to_cpus")));
3044       set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t,
3045                                               libnuma_v2_dlsym(handle, "numa_node_to_cpus")));
3046       set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
3047                                        libnuma_dlsym(handle, "numa_max_node")));
3048       set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3049                                                    libnuma_dlsym(handle, "numa_num_configured_nodes")));
3050       set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3051                                         libnuma_dlsym(handle, "numa_available")));
3052       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3053                                             libnuma_dlsym(handle, "numa_tonode_memory")));
3054       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3055                                                 libnuma_dlsym(handle, "numa_interleave_memory")));
3056       set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3057                                                 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3058       set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3059                                               libnuma_dlsym(handle, "numa_set_bind_policy")));
3060       set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3061                                                libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3062       set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3063                                        libnuma_dlsym(handle, "numa_distance")));
3064       set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
3065                                           libnuma_v2_dlsym(handle, "numa_get_membind")));
3066       set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
3067                                                   libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
3068       set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
3069                                          libnuma_dlsym(handle, "numa_move_pages")));
3070       set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t,
3071                                             libnuma_dlsym(handle, "numa_set_preferred")));
3072 
3073       if (numa_available() != -1) {
3074         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3075         set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3076         set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3077         set_numa_interleave_bitmask(_numa_get_interleave_mask());
3078         set_numa_membind_bitmask(_numa_get_membind());
3079         // Create an index -> node mapping, since nodes are not always consecutive
3080         _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3081         rebuild_nindex_to_node_map();
3082         // Create a cpu -> node mapping
3083         _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3084         rebuild_cpu_to_node_map();
3085         return true;
3086       }
3087     }
3088   }
3089   return false;
3090 }
3091 
3092 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
3093   // Creating guard page is very expensive. Java thread has HotSpot
3094   // guard pages, only enable glibc guard page for non-Java threads.
3095   // (Remember: compiler thread is a Java thread, too!)
3096   return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());
3097 }
3098 
3099 void os::Linux::rebuild_nindex_to_node_map() {
3100   int highest_node_number = Linux::numa_max_node();
3101 
3102   nindex_to_node()->clear();
3103   for (int node = 0; node <= highest_node_number; node++) {
3104     if (Linux::is_node_in_existing_nodes(node)) {
3105       nindex_to_node()->append(node);
3106     }
3107   }
3108 }
3109 
3110 // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
3111 // The table is later used in get_node_by_cpu().
3112 void os::Linux::rebuild_cpu_to_node_map() {
3113   const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
3114                               // in libnuma (possible values are starting from 16,
3115                               // and continuing up with every other power of 2, but less
3116                               // than the maximum number of CPUs supported by kernel), and
3117                               // is a subject to change (in libnuma version 2 the requirements
3118                               // are more reasonable) we'll just hardcode the number they use
3119                               // in the library.
3120   const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
3121 
3122   size_t cpu_num = processor_count();
3123   size_t cpu_map_size = NCPUS / BitsPerCLong;
3124   size_t cpu_map_valid_size =
3125     MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
3126 
3127   cpu_to_node()->clear();
3128   cpu_to_node()->at_grow(cpu_num - 1);
3129 
3130   size_t node_num = get_existing_num_nodes();
3131 
3132   int distance = 0;
3133   int closest_distance = INT_MAX;
3134   int closest_node = 0;
3135   unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
3136   for (size_t i = 0; i < node_num; i++) {
3137     // Check if node is configured (not a memory-less node). If it is not, find
3138     // the closest configured node. Check also if node is bound, i.e. it's allowed
3139     // to allocate memory from the node. If it's not allowed, map cpus in that node
3140     // to the closest node from which memory allocation is allowed.
3141     if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) ||
3142         !is_node_in_bound_nodes(nindex_to_node()->at(i))) {
3143       closest_distance = INT_MAX;
3144       // Check distance from all remaining nodes in the system. Ignore distance
3145       // from itself, from another non-configured node, and from another non-bound
3146       // node.
3147       for (size_t m = 0; m < node_num; m++) {
3148         if (m != i &&
3149             is_node_in_configured_nodes(nindex_to_node()->at(m)) &&
3150             is_node_in_bound_nodes(nindex_to_node()->at(m))) {
3151           distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
3152           // If a closest node is found, update. There is always at least one
3153           // configured and bound node in the system so there is always at least
3154           // one node close.
3155           if (distance != 0 && distance < closest_distance) {
3156             closest_distance = distance;
3157             closest_node = nindex_to_node()->at(m);
3158           }
3159         }
3160       }
3161      } else {
3162        // Current node is already a configured node.
3163        closest_node = nindex_to_node()->at(i);
3164      }
3165 
3166     // Get cpus from the original node and map them to the closest node. If node
3167     // is a configured node (not a memory-less node), then original node and
3168     // closest node are the same.
3169     if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
3170       for (size_t j = 0; j < cpu_map_valid_size; j++) {
3171         if (cpu_map[j] != 0) {
3172           for (size_t k = 0; k < BitsPerCLong; k++) {
3173             if (cpu_map[j] & (1UL << k)) {
3174               int cpu_index = j * BitsPerCLong + k;
3175 
3176 #ifndef PRODUCT
3177               if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) {
3178                 // Some debuggers limit the processor count without
3179                 // intercepting the NUMA APIs. Just fake the values.
3180                 cpu_index = 0;
3181               }
3182 #endif
3183 
3184               cpu_to_node()->at_put(cpu_index, closest_node);
3185             }
3186           }
3187         }
3188       }
3189     }
3190   }
3191   FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
3192 }
3193 
3194 int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
3195   // use the latest version of numa_node_to_cpus if available
3196   if (_numa_node_to_cpus_v2 != NULL) {
3197 
3198     // libnuma bitmask struct
3199     struct bitmask {
3200       unsigned long size; /* number of bits in the map */
3201       unsigned long *maskp;
3202     };
3203 
3204     struct bitmask mask;
3205     mask.maskp = (unsigned long *)buffer;
3206     mask.size = bufferlen * 8;
3207     return _numa_node_to_cpus_v2(node, &mask);
3208   } else if (_numa_node_to_cpus != NULL) {
3209     return _numa_node_to_cpus(node, buffer, bufferlen);
3210   }
3211   return -1;
3212 }
3213 
3214 int os::Linux::get_node_by_cpu(int cpu_id) {
3215   if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3216     return cpu_to_node()->at(cpu_id);
3217   }
3218   return -1;
3219 }
3220 
3221 GrowableArray<int>* os::Linux::_cpu_to_node;
3222 GrowableArray<int>* os::Linux::_nindex_to_node;
3223 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3224 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3225 os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2;
3226 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3227 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3228 os::Linux::numa_available_func_t os::Linux::_numa_available;
3229 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3230 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3231 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3232 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3233 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3234 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3235 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3236 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3237 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3238 os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
3239 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3240 unsigned long* os::Linux::_numa_all_nodes;
3241 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3242 struct bitmask* os::Linux::_numa_nodes_ptr;
3243 struct bitmask* os::Linux::_numa_interleave_bitmask;
3244 struct bitmask* os::Linux::_numa_membind_bitmask;
3245 
3246 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
3247   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3248                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3249   return res  != (uintptr_t) MAP_FAILED;
3250 }
3251 
3252 static address get_stack_commited_bottom(address bottom, size_t size) {
3253   address nbot = bottom;
3254   address ntop = bottom + size;
3255 
3256   size_t page_sz = os::vm_page_size();
3257   unsigned pages = size / page_sz;
3258 
3259   unsigned char vec[1];
3260   unsigned imin = 1, imax = pages + 1, imid;
3261   int mincore_return_value = 0;
3262 
3263   assert(imin <= imax, "Unexpected page size");
3264 
3265   while (imin < imax) {
3266     imid = (imax + imin) / 2;
3267     nbot = ntop - (imid * page_sz);
3268 
3269     // Use a trick with mincore to check whether the page is mapped or not.
3270     // mincore sets vec to 1 if page resides in memory and to 0 if page
3271     // is swapped output but if page we are asking for is unmapped
3272     // it returns -1,ENOMEM
3273     mincore_return_value = mincore(nbot, page_sz, vec);
3274 
3275     if (mincore_return_value == -1) {
3276       // Page is not mapped go up
3277       // to find first mapped page
3278       if (errno != EAGAIN) {
3279         assert(errno == ENOMEM, "Unexpected mincore errno");
3280         imax = imid;
3281       }
3282     } else {
3283       // Page is mapped go down
3284       // to find first not mapped page
3285       imin = imid + 1;
3286     }
3287   }
3288 
3289   nbot = nbot + page_sz;
3290 
3291   // Adjust stack bottom one page up if last checked page is not mapped
3292   if (mincore_return_value == -1) {
3293     nbot = nbot + page_sz;
3294   }
3295 
3296   return nbot;
3297 }
3298 
3299 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
3300   int mincore_return_value;
3301   const size_t stripe = 1024;  // query this many pages each time
3302   unsigned char vec[stripe + 1];
3303   // set a guard
3304   vec[stripe] = 'X';
3305 
3306   const size_t page_sz = os::vm_page_size();
3307   size_t pages = size / page_sz;
3308 
3309   assert(is_aligned(start, page_sz), "Start address must be page aligned");
3310   assert(is_aligned(size, page_sz), "Size must be page aligned");
3311 
3312   committed_start = NULL;
3313 
3314   int loops = (pages + stripe - 1) / stripe;
3315   int committed_pages = 0;
3316   address loop_base = start;
3317   bool found_range = false;
3318 
3319   for (int index = 0; index < loops && !found_range; index ++) {
3320     assert(pages > 0, "Nothing to do");
3321     int pages_to_query = (pages >= stripe) ? stripe : pages;
3322     pages -= pages_to_query;
3323 
3324     // Get stable read
3325     while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);
3326 
3327     // During shutdown, some memory goes away without properly notifying NMT,
3328     // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
3329     // Bailout and return as not committed for now.
3330     if (mincore_return_value == -1 && errno == ENOMEM) {
3331       return false;
3332     }
3333 
3334     assert(vec[stripe] == 'X', "overflow guard");
3335     assert(mincore_return_value == 0, "Range must be valid");
3336     // Process this stripe
3337     for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
3338       if ((vec[vecIdx] & 0x01) == 0) { // not committed
3339         // End of current contiguous region
3340         if (committed_start != NULL) {
3341           found_range = true;
3342           break;
3343         }
3344       } else { // committed
3345         // Start of region
3346         if (committed_start == NULL) {
3347           committed_start = loop_base + page_sz * vecIdx;
3348         }
3349         committed_pages ++;
3350       }
3351     }
3352 
3353     loop_base += pages_to_query * page_sz;
3354   }
3355 
3356   if (committed_start != NULL) {
3357     assert(committed_pages > 0, "Must have committed region");
3358     assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");
3359     assert(committed_start >= start && committed_start < start + size, "Out of range");
3360     committed_size = page_sz * committed_pages;
3361     return true;
3362   } else {
3363     assert(committed_pages == 0, "Should not have committed region");
3364     return false;
3365   }
3366 }
3367 
3368 
3369 // Linux uses a growable mapping for the stack, and if the mapping for
3370 // the stack guard pages is not removed when we detach a thread the
3371 // stack cannot grow beyond the pages where the stack guard was
3372 // mapped.  If at some point later in the process the stack expands to
3373 // that point, the Linux kernel cannot expand the stack any further
3374 // because the guard pages are in the way, and a segfault occurs.
3375 //
3376 // However, it's essential not to split the stack region by unmapping
3377 // a region (leaving a hole) that's already part of the stack mapping,
3378 // so if the stack mapping has already grown beyond the guard pages at
3379 // the time we create them, we have to truncate the stack mapping.
3380 // So, we need to know the extent of the stack mapping when
3381 // create_stack_guard_pages() is called.
3382 
3383 // We only need this for stacks that are growable: at the time of
3384 // writing thread stacks don't use growable mappings (i.e. those
3385 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
3386 // only applies to the main thread.
3387 
3388 // If the (growable) stack mapping already extends beyond the point
3389 // where we're going to put our guard pages, truncate the mapping at
3390 // that point by munmap()ping it.  This ensures that when we later
3391 // munmap() the guard pages we don't leave a hole in the stack
3392 // mapping. This only affects the main/primordial thread
3393 
3394 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3395   if (os::is_primordial_thread()) {
3396     // As we manually grow stack up to bottom inside create_attached_thread(),
3397     // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3398     // we don't need to do anything special.
3399     // Check it first, before calling heavy function.
3400     uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3401     unsigned char vec[1];
3402 
3403     if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3404       // Fallback to slow path on all errors, including EAGAIN
3405       assert((uintptr_t)addr >= stack_extent,
3406              "Sanity: addr should be larger than extent, " PTR_FORMAT " >= " PTR_FORMAT,
3407              p2i(addr), stack_extent);
3408       stack_extent = (uintptr_t) get_stack_commited_bottom(
3409                                                            os::Linux::initial_thread_stack_bottom(),
3410                                                            (size_t)addr - stack_extent);
3411     }
3412 
3413     if (stack_extent < (uintptr_t)addr) {
3414       ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3415     }
3416   }
3417 
3418   return os::commit_memory(addr, size, !ExecMem);
3419 }
3420 
3421 // If this is a growable mapping, remove the guard pages entirely by
3422 // munmap()ping them.  If not, just call uncommit_memory(). This only
3423 // affects the main/primordial thread, but guard against future OS changes.
3424 // It's safe to always unmap guard pages for primordial thread because we
3425 // always place it right after end of the mapped region.
3426 
3427 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3428   uintptr_t stack_extent, stack_base;
3429 
3430   if (os::is_primordial_thread()) {
3431     return ::munmap(addr, size) == 0;
3432   }
3433 
3434   return os::uncommit_memory(addr, size);
3435 }
3436 
3437 // 'requested_addr' is only treated as a hint, the return value may or
3438 // may not start from the requested address. Unlike Linux mmap(), this
3439 // function returns NULL to indicate failure.
3440 static char* anon_mmap(char* requested_addr, size_t bytes) {
3441   // MAP_FIXED is intentionally left out, to leave existing mappings intact.
3442   const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3443 
3444   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
3445   // touch an uncommitted page. Otherwise, the read/write might
3446   // succeed if we have enough swap space to back the physical page.
3447   char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
3448 
3449   return addr == MAP_FAILED ? NULL : addr;
3450 }
3451 
3452 // Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
3453 //   (req_addr != NULL) or with a given alignment.
3454 //  - bytes shall be a multiple of alignment.
3455 //  - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3456 //  - alignment sets the alignment at which memory shall be allocated.
3457 //     It must be a multiple of allocation granularity.
3458 // Returns address of memory or NULL. If req_addr was not NULL, will only return
3459 //  req_addr or NULL.
3460 static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) {
3461   size_t extra_size = bytes;
3462   if (req_addr == NULL && alignment > 0) {
3463     extra_size += alignment;
3464   }
3465 
3466   char* start = anon_mmap(req_addr, extra_size);
3467   if (start != NULL) {
3468     if (req_addr != NULL) {
3469       if (start != req_addr) {
3470         ::munmap(start, extra_size);
3471         start = NULL;
3472       }
3473     } else {
3474       char* const start_aligned = align_up(start, alignment);
3475       char* const end_aligned = start_aligned + bytes;
3476       char* const end = start + extra_size;
3477       if (start_aligned > start) {
3478         ::munmap(start, start_aligned - start);
3479       }
3480       if (end_aligned < end) {
3481         ::munmap(end_aligned, end - end_aligned);
3482       }
3483       start = start_aligned;
3484     }
3485   }
3486   return start;
3487 }
3488 
3489 static int anon_munmap(char * addr, size_t size) {
3490   return ::munmap(addr, size) == 0;
3491 }
3492 
3493 char* os::pd_reserve_memory(size_t bytes, bool exec) {
3494   return anon_mmap(NULL, bytes);
3495 }
3496 
3497 bool os::pd_release_memory(char* addr, size_t size) {
3498   return anon_munmap(addr, size);
3499 }
3500 
3501 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3502 extern char* g_assert_poison; // assertion poison page address
3503 #endif
3504 
3505 static bool linux_mprotect(char* addr, size_t size, int prot) {
3506   // Linux wants the mprotect address argument to be page aligned.
3507   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3508 
3509   // According to SUSv3, mprotect() should only be used with mappings
3510   // established by mmap(), and mmap() always maps whole pages. Unaligned
3511   // 'addr' likely indicates problem in the VM (e.g. trying to change
3512   // protection of malloc'ed or statically allocated memory). Check the
3513   // caller if you hit this assert.
3514   assert(addr == bottom, "sanity check");
3515 
3516   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3517   // Don't log anything if we're executing in the poison page signal handling
3518   // context. It can lead to reentrant use of other parts of the VM code.
3519 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3520   if (addr != g_assert_poison)
3521 #endif
3522   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
3523   return ::mprotect(bottom, size, prot) == 0;
3524 }
3525 
3526 // Set protections specified
3527 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3528                         bool is_committed) {
3529   unsigned int p = 0;
3530   switch (prot) {
3531   case MEM_PROT_NONE: p = PROT_NONE; break;
3532   case MEM_PROT_READ: p = PROT_READ; break;
3533   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3534   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3535   default:
3536     ShouldNotReachHere();
3537   }
3538   // is_committed is unused.
3539   return linux_mprotect(addr, bytes, p);
3540 }
3541 
3542 bool os::guard_memory(char* addr, size_t size) {
3543   return linux_mprotect(addr, size, PROT_NONE);
3544 }
3545 
3546 bool os::unguard_memory(char* addr, size_t size) {
3547   return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3548 }
3549 
3550 bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
3551                                                     size_t page_size) {
3552   bool result = false;
3553   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3554                  MAP_ANONYMOUS|MAP_PRIVATE,
3555                  -1, 0);
3556   if (p != MAP_FAILED) {
3557     void *aligned_p = align_up(p, page_size);
3558 
3559     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3560 
3561     munmap(p, page_size * 2);
3562   }
3563 
3564   if (warn && !result) {
3565     warning("TransparentHugePages is not supported by the operating system.");
3566   }
3567 
3568   return result;
3569 }
3570 
3571 int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {
3572   if (page_size != default_large_page_size()) {
3573     return (exact_log2(page_size) << MAP_HUGE_SHIFT);
3574   }
3575   return 0;
3576 }
3577 
3578 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3579   // Include the page size flag to ensure we sanity check the correct page size.
3580   int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
3581   void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0);
3582 
3583   if (p != MAP_FAILED) {
3584     // Mapping succeeded, sanity check passed.
3585     munmap(p, page_size);
3586     return true;
3587   } else {
3588       log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, "
3589                          "checking if smaller large page sizes are usable",
3590                          byte_size_in_exact_unit(page_size),
3591                          exact_unit_for_byte_size(page_size));
3592       for (size_t page_size_ = _page_sizes.next_smaller(page_size);
3593           page_size_ != (size_t)os::vm_page_size();
3594           page_size_ = _page_sizes.next_smaller(page_size_)) {
3595         flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
3596         p = mmap(NULL, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
3597         if (p != MAP_FAILED) {
3598           // Mapping succeeded, sanity check passed.
3599           munmap(p, page_size_);
3600           log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check",
3601                              byte_size_in_exact_unit(page_size_),
3602                              exact_unit_for_byte_size(page_size_));
3603           return true;
3604         }
3605       }
3606   }
3607 
3608   if (warn) {
3609     warning("HugeTLBFS is not configured or not supported by the operating system.");
3610   }
3611 
3612   return false;
3613 }
3614 
3615 bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) {
3616   // Try to create a large shared memory segment.
3617   int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3618   if (shmid == -1) {
3619     // Possible reasons for shmget failure:
3620     // 1. shmmax is too small for the request.
3621     //    > check shmmax value: cat /proc/sys/kernel/shmmax
3622     //    > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
3623     // 2. not enough large page memory.
3624     //    > check available large pages: cat /proc/meminfo
3625     //    > increase amount of large pages:
3626     //          sysctl -w vm.nr_hugepages=new_value
3627     //    > For more information regarding large pages please refer to:
3628     //      https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
3629     if (warn) {
3630       warning("Large pages using UseSHM are not configured on this system.");
3631     }
3632     return false;
3633   }
3634   // Managed to create a segment, now delete it.
3635   shmctl(shmid, IPC_RMID, NULL);
3636   return true;
3637 }
3638 
3639 // From the coredump_filter documentation:
3640 //
3641 // - (bit 0) anonymous private memory
3642 // - (bit 1) anonymous shared memory
3643 // - (bit 2) file-backed private memory
3644 // - (bit 3) file-backed shared memory
3645 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3646 //           effective only if the bit 2 is cleared)
3647 // - (bit 5) hugetlb private memory
3648 // - (bit 6) hugetlb shared memory
3649 // - (bit 7) dax private memory
3650 // - (bit 8) dax shared memory
3651 //
3652 static void set_coredump_filter(CoredumpFilterBit bit) {
3653   FILE *f;
3654   long cdm;
3655 
3656   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3657     return;
3658   }
3659 
3660   if (fscanf(f, "%lx", &cdm) != 1) {
3661     fclose(f);
3662     return;
3663   }
3664 
3665   long saved_cdm = cdm;
3666   rewind(f);
3667   cdm |= bit;
3668 
3669   if (cdm != saved_cdm) {
3670     fprintf(f, "%#lx", cdm);
3671   }
3672 
3673   fclose(f);
3674 }
3675 
3676 // Large page support
3677 
3678 static size_t _large_page_size = 0;
3679 
3680 static size_t scan_default_large_page_size() {
3681   size_t default_large_page_size = 0;
3682 
3683   // large_page_size on Linux is used to round up heap size. x86 uses either
3684   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3685   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3686   // page as large as 1G.
3687   //
3688   // Here we try to figure out page size by parsing /proc/meminfo and looking
3689   // for a line with the following format:
3690   //    Hugepagesize:     2048 kB
3691   //
3692   // If we can't determine the value (e.g. /proc is not mounted, or the text
3693   // format has been changed), we'll set largest page size to 0
3694 
3695   FILE *fp = fopen("/proc/meminfo", "r");
3696   if (fp) {
3697     while (!feof(fp)) {
3698       int x = 0;
3699       char buf[16];
3700       if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3701         if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3702           default_large_page_size = x * K;
3703           break;
3704         }
3705       } else {
3706         // skip to next line
3707         for (;;) {
3708           int ch = fgetc(fp);
3709           if (ch == EOF || ch == (int)'\n') break;
3710         }
3711       }
3712     }
3713     fclose(fp);
3714   }
3715 
3716   return default_large_page_size;
3717 }
3718 
3719 static os::PageSizes scan_multiple_page_support() {
3720   // Scan /sys/kernel/mm/hugepages
3721   // to discover the available page sizes
3722   const char* sys_hugepages = "/sys/kernel/mm/hugepages";
3723   os::PageSizes page_sizes;
3724 
3725   DIR *dir = opendir(sys_hugepages);
3726 
3727   struct dirent *entry;
3728   size_t page_size;
3729   while ((entry = readdir(dir)) != NULL) {
3730     if (entry->d_type == DT_DIR &&
3731         sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {
3732       // The kernel is using kB, hotspot uses bytes
3733       // Add each found Large Page Size to page_sizes
3734       page_sizes.add(page_size * K);
3735     }
3736   }
3737   closedir(dir);
3738 
3739   LogTarget(Debug, pagesize) lt;
3740   if (lt.is_enabled()) {
3741     LogStream ls(lt);
3742     ls.print("Large Page sizes: ");
3743     page_sizes.print_on(&ls);
3744   }
3745 
3746   return page_sizes;
3747 }
3748 
3749 size_t os::Linux::default_large_page_size() {
3750   return _default_large_page_size;
3751 }
3752 
3753 void warn_no_large_pages_configured() {
3754   if (!FLAG_IS_DEFAULT(UseLargePages)) {
3755     log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");
3756   }
3757 }
3758 
3759 bool os::Linux::setup_large_page_type(size_t page_size) {
3760   if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3761       FLAG_IS_DEFAULT(UseSHM) &&
3762       FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3763 
3764     // The type of large pages has not been specified by the user.
3765 
3766     // Try UseHugeTLBFS and then UseSHM.
3767     UseHugeTLBFS = UseSHM = true;
3768 
3769     // Don't try UseTransparentHugePages since there are known
3770     // performance issues with it turned on. This might change in the future.
3771     UseTransparentHugePages = false;
3772   }
3773 
3774   if (UseTransparentHugePages) {
3775     bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3776     if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3777       UseHugeTLBFS = false;
3778       UseSHM = false;
3779       return true;
3780     }
3781     UseTransparentHugePages = false;
3782   }
3783 
3784   if (UseHugeTLBFS) {
3785     bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3786     if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3787       UseSHM = false;
3788       return true;
3789     }
3790     UseHugeTLBFS = false;
3791   }
3792 
3793   if (UseSHM) {
3794     bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM);
3795     if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3796       return true;
3797     }
3798     UseSHM = false;
3799   }
3800 
3801   warn_no_large_pages_configured();
3802   return false;
3803 }
3804 
3805 void os::large_page_init() {
3806   // 1) Handle the case where we do not want to use huge pages and hence
3807   //    there is no need to scan the OS for related info
3808   if (!UseLargePages &&
3809       !UseTransparentHugePages &&
3810       !UseHugeTLBFS &&
3811       !UseSHM) {
3812     // Not using large pages.
3813     return;
3814   }
3815 
3816   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3817     // The user explicitly turned off large pages.
3818     // Ignore the rest of the large pages flags.
3819     UseTransparentHugePages = false;
3820     UseHugeTLBFS = false;
3821     UseSHM = false;
3822     return;
3823   }
3824 
3825   // 2) Scan OS info
3826   size_t default_large_page_size = scan_default_large_page_size();
3827   os::Linux::_default_large_page_size = default_large_page_size;
3828   if (default_large_page_size == 0) {
3829     // No large pages configured, return.
3830     warn_no_large_pages_configured();
3831     UseLargePages = false;
3832     UseTransparentHugePages = false;
3833     UseHugeTLBFS = false;
3834     UseSHM = false;
3835     return;
3836   }
3837   os::PageSizes all_large_pages = scan_multiple_page_support();
3838 
3839   // 3) Consistency check and post-processing
3840 
3841   // It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually
3842   // re-add the default page size to the list of page sizes to be sure.
3843   all_large_pages.add(default_large_page_size);
3844 
3845   // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size
3846   // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes
3847   // doesn't match an available page size set _large_page_size to default_large_page_size
3848   // and use it as the maximum.
3849  if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||
3850       LargePageSizeInBytes == 0 ||
3851       LargePageSizeInBytes == default_large_page_size) {
3852     _large_page_size = default_large_page_size;
3853     log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",
3854                        byte_size_in_exact_unit(_large_page_size),
3855                        exact_unit_for_byte_size(_large_page_size));
3856   } else {
3857     if (all_large_pages.contains(LargePageSizeInBytes)) {
3858       _large_page_size = LargePageSizeInBytes;
3859       log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "
3860                          "using LargePageSizeInBytes: " SIZE_FORMAT "%s",
3861                          byte_size_in_exact_unit(default_large_page_size),
3862                          exact_unit_for_byte_size(default_large_page_size),
3863                          byte_size_in_exact_unit(_large_page_size),
3864                          exact_unit_for_byte_size(_large_page_size));
3865     } else {
3866       _large_page_size = default_large_page_size;
3867       log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "
3868                          "using the default large page size: " SIZE_FORMAT "%s",
3869                          byte_size_in_exact_unit(LargePageSizeInBytes),
3870                          exact_unit_for_byte_size(LargePageSizeInBytes),
3871                          byte_size_in_exact_unit(_large_page_size),
3872                          exact_unit_for_byte_size(_large_page_size));
3873     }
3874   }
3875 
3876   // Populate _page_sizes with large page sizes less than or equal to
3877   // _large_page_size.
3878   for (size_t page_size = _large_page_size; page_size != 0;
3879          page_size = all_large_pages.next_smaller(page_size)) {
3880     _page_sizes.add(page_size);
3881   }
3882 
3883   LogTarget(Info, pagesize) lt;
3884   if (lt.is_enabled()) {
3885     LogStream ls(lt);
3886     ls.print("Usable page sizes: ");
3887     _page_sizes.print_on(&ls);
3888   }
3889 
3890   // Now determine the type of large pages to use:
3891   UseLargePages = os::Linux::setup_large_page_type(_large_page_size);
3892 
3893   set_coredump_filter(LARGEPAGES_BIT);
3894 }
3895 
3896 #ifndef SHM_HUGETLB
3897   #define SHM_HUGETLB 04000
3898 #endif
3899 
3900 #define shm_warning_format(format, ...)              \
3901   do {                                               \
3902     if (UseLargePages &&                             \
3903         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3904          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3905          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3906       warning(format, __VA_ARGS__);                  \
3907     }                                                \
3908   } while (0)
3909 
3910 #define shm_warning(str) shm_warning_format("%s", str)
3911 
3912 #define shm_warning_with_errno(str)                \
3913   do {                                             \
3914     int err = errno;                               \
3915     shm_warning_format(str " (error = %d)", err);  \
3916   } while (0)
3917 
3918 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
3919   assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
3920 
3921   if (!is_aligned(alignment, SHMLBA)) {
3922     assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
3923     return NULL;
3924   }
3925 
3926   // To ensure that we get 'alignment' aligned memory from shmat,
3927   // we pre-reserve aligned virtual memory and then attach to that.
3928 
3929   char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment);
3930   if (pre_reserved_addr == NULL) {
3931     // Couldn't pre-reserve aligned memory.
3932     shm_warning("Failed to pre-reserve aligned memory for shmat.");
3933     return NULL;
3934   }
3935 
3936   // SHM_REMAP is needed to allow shmat to map over an existing mapping.
3937   char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
3938 
3939   if ((intptr_t)addr == -1) {
3940     int err = errno;
3941     shm_warning_with_errno("Failed to attach shared memory.");
3942 
3943     assert(err != EACCES, "Unexpected error");
3944     assert(err != EIDRM,  "Unexpected error");
3945     assert(err != EINVAL, "Unexpected error");
3946 
3947     // Since we don't know if the kernel unmapped the pre-reserved memory area
3948     // we can't unmap it, since that would potentially unmap memory that was
3949     // mapped from other threads.
3950     return NULL;
3951   }
3952 
3953   return addr;
3954 }
3955 
3956 static char* shmat_at_address(int shmid, char* req_addr) {
3957   if (!is_aligned(req_addr, SHMLBA)) {
3958     assert(false, "Requested address needs to be SHMLBA aligned");
3959     return NULL;
3960   }
3961 
3962   char* addr = (char*)shmat(shmid, req_addr, 0);
3963 
3964   if ((intptr_t)addr == -1) {
3965     shm_warning_with_errno("Failed to attach shared memory.");
3966     return NULL;
3967   }
3968 
3969   return addr;
3970 }
3971 
3972 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
3973   // If a req_addr has been provided, we assume that the caller has already aligned the address.
3974   if (req_addr != NULL) {
3975     assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
3976     assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
3977     return shmat_at_address(shmid, req_addr);
3978   }
3979 
3980   // Since shmid has been setup with SHM_HUGETLB, shmat will automatically
3981   // return large page size aligned memory addresses when req_addr == NULL.
3982   // However, if the alignment is larger than the large page size, we have
3983   // to manually ensure that the memory returned is 'alignment' aligned.
3984   if (alignment > os::large_page_size()) {
3985     assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
3986     return shmat_with_alignment(shmid, bytes, alignment);
3987   } else {
3988     return shmat_at_address(shmid, NULL);
3989   }
3990 }
3991 
3992 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
3993                                             char* req_addr, bool exec) {
3994   // "exec" is passed in but not used.  Creating the shared image for
3995   // the code cache doesn't have an SHM_X executable permission to check.
3996   assert(UseLargePages && UseSHM, "only for SHM large pages");
3997   assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
3998   assert(is_aligned(req_addr, alignment), "Unaligned address");
3999 
4000   if (!is_aligned(bytes, os::large_page_size())) {
4001     return NULL; // Fallback to small pages.
4002   }
4003 
4004   // Create a large shared memory region to attach to based on size.
4005   // Currently, size is the total size of the heap.
4006   int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
4007   if (shmid == -1) {
4008     // Possible reasons for shmget failure:
4009     // 1. shmmax is too small for the request.
4010     //    > check shmmax value: cat /proc/sys/kernel/shmmax
4011     //    > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
4012     // 2. not enough large page memory.
4013     //    > check available large pages: cat /proc/meminfo
4014     //    > increase amount of large pages:
4015     //          sysctl -w vm.nr_hugepages=new_value
4016     //    > For more information regarding large pages please refer to:
4017     //      https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
4018     //      Note 1: different Linux may use different name for this property,
4019     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
4020     //      Note 2: it's possible there's enough physical memory available but
4021     //            they are so fragmented after a long run that they can't
4022     //            coalesce into large pages. Try to reserve large pages when
4023     //            the system is still "fresh".
4024     shm_warning_with_errno("Failed to reserve shared memory.");
4025     return NULL;
4026   }
4027 
4028   // Attach to the region.
4029   char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
4030 
4031   // Remove shmid. If shmat() is successful, the actual shared memory segment
4032   // will be deleted when it's detached by shmdt() or when the process
4033   // terminates. If shmat() is not successful this will remove the shared
4034   // segment immediately.
4035   shmctl(shmid, IPC_RMID, NULL);
4036 
4037   return addr;
4038 }
4039 
4040 static void warn_on_commit_special_failure(char* req_addr, size_t bytes,
4041                                            size_t page_size, int error) {
4042   assert(error == ENOMEM, "Only expect to fail if no memory is available");
4043 
4044   bool warn_on_failure = UseLargePages &&
4045       (!FLAG_IS_DEFAULT(UseLargePages) ||
4046        !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
4047        !FLAG_IS_DEFAULT(LargePageSizeInBytes));
4048 
4049   if (warn_on_failure) {
4050     char msg[128];
4051     jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory. req_addr: "
4052                                    PTR_FORMAT " bytes: " SIZE_FORMAT " page size: "
4053                                    SIZE_FORMAT " (errno = %d).",
4054                                    req_addr, bytes, page_size, error);
4055     warning("%s", msg);
4056   }
4057 }
4058 
4059 bool os::Linux::commit_memory_special(size_t bytes,
4060                                       size_t page_size,
4061                                       char* req_addr,
4062                                       bool exec) {
4063   assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used");
4064   assert(is_aligned(bytes, page_size), "Unaligned size");
4065   assert(is_aligned(req_addr, page_size), "Unaligned address");
4066   assert(req_addr != NULL, "Must have a requested address for special mappings");
4067 
4068   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4069   int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
4070 
4071   // For large pages additional flags are required.
4072   if (page_size > (size_t) os::vm_page_size()) {
4073     flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
4074   }
4075   char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
4076 
4077   if (addr == MAP_FAILED) {
4078     warn_on_commit_special_failure(req_addr, bytes, page_size, errno);
4079     return false;
4080   }
4081 
4082   log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size="
4083                       SIZE_FORMAT "%s",
4084                       p2i(addr), byte_size_in_exact_unit(bytes),
4085                       exact_unit_for_byte_size(bytes),
4086                       byte_size_in_exact_unit(page_size),
4087                       exact_unit_for_byte_size(page_size));
4088   assert(is_aligned(addr, page_size), "Must be");
4089   return true;
4090 }
4091 
4092 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
4093                                                    size_t alignment,
4094                                                    size_t page_size,
4095                                                    char* req_addr,
4096                                                    bool exec) {
4097   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
4098   assert(is_aligned(req_addr, alignment), "Must be");
4099   assert(is_aligned(req_addr, page_size), "Must be");
4100   assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
4101   assert(_page_sizes.contains(page_size), "Must be a valid page size");
4102   assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size");
4103   assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
4104 
4105   // We only end up here when at least 1 large page can be used.
4106   // If the size is not a multiple of the large page size, we
4107   // will mix the type of pages used, but in a decending order.
4108   // Start off by reserving a range of the given size that is
4109   // properly aligned. At this point no pages are committed. If
4110   // a requested address is given it will be used and it must be
4111   // aligned to both the large page size and the given alignment.
4112   // The larger of the two will be used.
4113   size_t required_alignment = MAX(page_size, alignment);
4114   char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment);
4115   if (aligned_start == NULL) {
4116     return NULL;
4117   }
4118 
4119   // First commit using large pages.
4120   size_t large_bytes = align_down(bytes, page_size);
4121   bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec);
4122 
4123   if (large_committed && bytes == large_bytes) {
4124     // The size was large page aligned so no additional work is
4125     // needed even if the commit failed.
4126     return aligned_start;
4127   }
4128 
4129   // The requested size requires some small pages as well.
4130   char* small_start = aligned_start + large_bytes;
4131   size_t small_size = bytes - large_bytes;
4132   if (!large_committed) {
4133     // Failed to commit large pages, so we need to unmap the
4134     // reminder of the orinal reservation.
4135     ::munmap(small_start, small_size);
4136     return NULL;
4137   }
4138 
4139   // Commit the remaining bytes using small pages.
4140   bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec);
4141   if (!small_committed) {
4142     // Failed to commit the remaining size, need to unmap
4143     // the large pages part of the reservation.
4144     ::munmap(aligned_start, large_bytes);
4145     return NULL;
4146   }
4147   return aligned_start;
4148 }
4149 
4150 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
4151                                     char* req_addr, bool exec) {
4152   assert(UseLargePages, "only for large pages");
4153 
4154   char* addr;
4155   if (UseSHM) {
4156     // No support for using specific page sizes with SHM.
4157     addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
4158   } else {
4159     assert(UseHugeTLBFS, "must be");
4160     addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
4161   }
4162 
4163   if (addr != NULL) {
4164     if (UseNUMAInterleaving) {
4165       numa_make_global(addr, bytes);
4166     }
4167   }
4168 
4169   return addr;
4170 }
4171 
4172 bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
4173   // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
4174   return shmdt(base) == 0;
4175 }
4176 
4177 bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
4178   return pd_release_memory(base, bytes);
4179 }
4180 
4181 bool os::pd_release_memory_special(char* base, size_t bytes) {
4182   assert(UseLargePages, "only for large pages");
4183   bool res;
4184 
4185   if (UseSHM) {
4186     res = os::Linux::release_memory_special_shm(base, bytes);
4187   } else {
4188     assert(UseHugeTLBFS, "must be");
4189     res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
4190   }
4191   return res;
4192 }
4193 
4194 size_t os::large_page_size() {
4195   return _large_page_size;
4196 }
4197 
4198 // With SysV SHM the entire memory region must be allocated as shared
4199 // memory.
4200 // HugeTLBFS allows application to commit large page memory on demand.
4201 // However, when committing memory with HugeTLBFS fails, the region
4202 // that was supposed to be committed will lose the old reservation
4203 // and allow other threads to steal that memory region. Because of this
4204 // behavior we can't commit HugeTLBFS memory.
4205 bool os::can_commit_large_page_memory() {
4206   return UseTransparentHugePages;
4207 }
4208 
4209 bool os::can_execute_large_page_memory() {
4210   return UseTransparentHugePages || UseHugeTLBFS;
4211 }
4212 
4213 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
4214   assert(file_desc >= 0, "file_desc is not valid");
4215   char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
4216   if (result != NULL) {
4217     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
4218       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
4219     }
4220   }
4221   return result;
4222 }
4223 
4224 // Reserve memory at an arbitrary address, only if that area is
4225 // available (and not reserved for something else).
4226 
4227 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
4228   // Assert only that the size is a multiple of the page size, since
4229   // that's all that mmap requires, and since that's all we really know
4230   // about at this low abstraction level.  If we need higher alignment,
4231   // we can either pass an alignment to this method or verify alignment
4232   // in one of the methods further up the call chain.  See bug 5044738.
4233   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4234 
4235   // Repeatedly allocate blocks until the block is allocated at the
4236   // right spot.
4237 
4238   // Linux mmap allows caller to pass an address as hint; give it a try first,
4239   // if kernel honors the hint then we can return immediately.
4240   char * addr = anon_mmap(requested_addr, bytes);
4241   if (addr == requested_addr) {
4242     return requested_addr;
4243   }
4244 
4245   if (addr != NULL) {
4246     // mmap() is successful but it fails to reserve at the requested address
4247     anon_munmap(addr, bytes);
4248   }
4249 
4250   return NULL;
4251 }
4252 
4253 // Used to convert frequent JVM_Yield() to nops
4254 bool os::dont_yield() {
4255   return DontYieldALot;
4256 }
4257 
4258 // Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
4259 // actually give up the CPU. Since skip buddy (v2.6.28):
4260 //
4261 // * Sets the yielding task as skip buddy for current CPU's run queue.
4262 // * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
4263 // * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
4264 //
4265 // An alternative is calling os::naked_short_nanosleep with a small number to avoid
4266 // getting re-scheduled immediately.
4267 //
4268 void os::naked_yield() {
4269   sched_yield();
4270 }
4271 
4272 ////////////////////////////////////////////////////////////////////////////////
4273 // thread priority support
4274 
4275 // Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
4276 // only supports dynamic priority, static priority must be zero. For real-time
4277 // applications, Linux supports SCHED_RR which allows static priority (1-99).
4278 // However, for large multi-threaded applications, SCHED_RR is not only slower
4279 // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
4280 // of 5 runs - Sep 2005).
4281 //
4282 // The following code actually changes the niceness of kernel-thread/LWP. It
4283 // has an assumption that setpriority() only modifies one kernel-thread/LWP,
4284 // not the entire user process, and user level threads are 1:1 mapped to kernel
4285 // threads. It has always been the case, but could change in the future. For
4286 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
4287 // It is only used when ThreadPriorityPolicy=1 and may require system level permission
4288 // (e.g., root privilege or CAP_SYS_NICE capability).
4289 
4290 int os::java_to_os_priority[CriticalPriority + 1] = {
4291   19,              // 0 Entry should never be used
4292 
4293    4,              // 1 MinPriority
4294    3,              // 2
4295    2,              // 3
4296 
4297    1,              // 4
4298    0,              // 5 NormPriority
4299   -1,              // 6
4300 
4301   -2,              // 7
4302   -3,              // 8
4303   -4,              // 9 NearMaxPriority
4304 
4305   -5,              // 10 MaxPriority
4306 
4307   -5               // 11 CriticalPriority
4308 };
4309 
4310 static int prio_init() {
4311   if (ThreadPriorityPolicy == 1) {
4312     if (geteuid() != 0) {
4313       if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
4314         warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
4315                 "e.g., being the root user. If the necessary permission is not " \
4316                 "possessed, changes to priority will be silently ignored.");
4317       }
4318     }
4319   }
4320   if (UseCriticalJavaThreadPriority) {
4321     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
4322   }
4323   return 0;
4324 }
4325 
4326 OSReturn os::set_native_priority(Thread* thread, int newpri) {
4327   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
4328 
4329   int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
4330   return (ret == 0) ? OS_OK : OS_ERR;
4331 }
4332 
4333 OSReturn os::get_native_priority(const Thread* const thread,
4334                                  int *priority_ptr) {
4335   if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
4336     *priority_ptr = java_to_os_priority[NormPriority];
4337     return OS_OK;
4338   }
4339 
4340   errno = 0;
4341   *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
4342   return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
4343 }
4344 
4345 // This is the fastest way to get thread cpu time on Linux.
4346 // Returns cpu time (user+sys) for any thread, not only for current.
4347 // POSIX compliant clocks are implemented in the kernels 2.6.16+.
4348 // It might work on 2.6.10+ with a special kernel/glibc patch.
4349 // For reference, please, see IEEE Std 1003.1-2004:
4350 //   http://www.unix.org/single_unix_specification
4351 
4352 jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4353   struct timespec tp;
4354   int status = clock_gettime(clockid, &tp);
4355   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
4356   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4357 }
4358 
4359 // Determine if the vmid is the parent pid for a child in a PID namespace.
4360 // Return the namespace pid if so, otherwise -1.
4361 int os::Linux::get_namespace_pid(int vmid) {
4362   char fname[24];
4363   int retpid = -1;
4364 
4365   snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);
4366   FILE *fp = fopen(fname, "r");
4367 
4368   if (fp) {
4369     int pid, nspid;
4370     int ret;
4371     while (!feof(fp) && !ferror(fp)) {
4372       ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);
4373       if (ret == 1) {
4374         break;
4375       }
4376       if (ret == 2) {
4377         retpid = nspid;
4378         break;
4379       }
4380       for (;;) {
4381         int ch = fgetc(fp);
4382         if (ch == EOF || ch == (int)'\n') break;
4383       }
4384     }
4385     fclose(fp);
4386   }
4387   return retpid;
4388 }
4389 
4390 extern void report_error(char* file_name, int line_no, char* title,
4391                          char* format, ...);
4392 
4393 // Some linux distributions (notably: Alpine Linux) include the
4394 // grsecurity in the kernel. Of particular interest from a JVM perspective
4395 // is PaX (https://pax.grsecurity.net/), which adds some security features
4396 // related to page attributes. Specifically, the MPROTECT PaX functionality
4397 // (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic
4398 // code generation by disallowing a (previously) writable page to be
4399 // marked as executable. This is, of course, exactly what HotSpot does
4400 // for both JIT compiled method, as well as for stubs, adapters, etc.
4401 //
4402 // Instead of crashing "lazily" when trying to make a page executable,
4403 // this code probes for the presence of PaX and reports the failure
4404 // eagerly.
4405 static void check_pax(void) {
4406   // Zero doesn't generate code dynamically, so no need to perform the PaX check
4407 #ifndef ZERO
4408   size_t size = os::Linux::page_size();
4409 
4410   void* p = ::mmap(NULL, size, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4411   if (p == MAP_FAILED) {
4412     log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno));
4413     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check.");
4414   }
4415 
4416   int res = ::mprotect(p, size, PROT_WRITE|PROT_EXEC);
4417   if (res == -1) {
4418     log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno));
4419     vm_exit_during_initialization(
4420       "Failed to mark memory page as executable - check if grsecurity/PaX is enabled");
4421   }
4422 
4423   ::munmap(p, size);
4424 #endif
4425 }
4426 
4427 // this is called _before_ most of the global arguments have been parsed
4428 void os::init(void) {
4429   char dummy;   // used to get a guess on initial stack address
4430 
4431   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
4432 
4433   Linux::set_page_size(sysconf(_SC_PAGESIZE));
4434   if (Linux::page_size() == -1) {
4435     fatal("os_linux.cpp: os::init: sysconf failed (%s)",
4436           os::strerror(errno));
4437   }
4438   _page_sizes.add(Linux::page_size());
4439 
4440   Linux::initialize_system_info();
4441 
4442 #ifdef __GLIBC__
4443   Linux::_mallinfo = CAST_TO_FN_PTR(Linux::mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo"));
4444   Linux::_mallinfo2 = CAST_TO_FN_PTR(Linux::mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2"));
4445 #endif // __GLIBC__
4446 
4447   os::Linux::CPUPerfTicks pticks;
4448   bool res = os::Linux::get_tick_information(&pticks, -1);
4449 
4450   if (res && pticks.has_steal_ticks) {
4451     has_initial_tick_info = true;
4452     initial_total_ticks = pticks.total;
4453     initial_steal_ticks = pticks.steal;
4454   }
4455 
4456   // _main_thread points to the thread that created/loaded the JVM.
4457   Linux::_main_thread = pthread_self();
4458 
4459   // retrieve entry point for pthread_setname_np
4460   Linux::_pthread_setname_np =
4461     (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
4462 
4463   check_pax();
4464 
4465   os::Posix::init();
4466 
4467   initial_time_count = javaTimeNanos();
4468 }
4469 
4470 // To install functions for atexit system call
4471 extern "C" {
4472   static void perfMemory_exit_helper() {
4473     perfMemory_exit();
4474   }
4475 }
4476 
4477 void os::pd_init_container_support() {
4478   OSContainer::init();
4479 }
4480 
4481 void os::Linux::numa_init() {
4482 
4483   // Java can be invoked as
4484   // 1. Without numactl and heap will be allocated/configured on all nodes as
4485   //    per the system policy.
4486   // 2. With numactl --interleave:
4487   //      Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same
4488   //      API for membind case bitmask is reset.
4489   //      Interleave is only hint and Kernel can fallback to other nodes if
4490   //      no memory is available on the target nodes.
4491   // 3. With numactl --membind:
4492   //      Use numa_get_membind(v2) API to get nodes bitmask. The same API for
4493   //      interleave case returns bitmask of all nodes.
4494   // numa_all_nodes_ptr holds bitmask of all nodes.
4495   // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct
4496   // bitmask when externally configured to run on all or fewer nodes.
4497 
4498   if (!Linux::libnuma_init()) {
4499     FLAG_SET_ERGO(UseNUMA, false);
4500     FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.
4501   } else {
4502     if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
4503       // If there's only one node (they start from 0) or if the process
4504       // is bound explicitly to a single node using membind, disable NUMA
4505       UseNUMA = false;
4506     } else {
4507       LogTarget(Info,os) log;
4508       LogStream ls(log);
4509 
4510       Linux::set_configured_numa_policy(Linux::identify_numa_policy());
4511 
4512       struct bitmask* bmp = Linux::_numa_membind_bitmask;
4513       const char* numa_mode = "membind";
4514 
4515       if (Linux::is_running_in_interleave_mode()) {
4516         bmp = Linux::_numa_interleave_bitmask;
4517         numa_mode = "interleave";
4518       }
4519 
4520       ls.print("UseNUMA is enabled and invoked in '%s' mode."
4521                " Heap will be configured using NUMA memory nodes:", numa_mode);
4522 
4523       for (int node = 0; node <= Linux::numa_max_node(); node++) {
4524         if (Linux::_numa_bitmask_isbitset(bmp, node)) {
4525           ls.print(" %d", node);
4526         }
4527       }
4528     }
4529   }
4530 
4531   // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4532   if (UseNUMA && !UseNUMAInterleaving) {
4533     FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
4534   }
4535 
4536   if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
4537     // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
4538     // we can make the adaptive lgrp chunk resizing work. If the user specified both
4539     // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
4540     // and disable adaptive resizing.
4541     if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
4542       warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
4543               "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
4544       UseAdaptiveSizePolicy = false;
4545       UseAdaptiveNUMAChunkSizing = false;
4546     }
4547   }
4548 }
4549 
4550 // this is called _after_ the global arguments have been parsed
4551 jint os::init_2(void) {
4552 
4553   // This could be set after os::Posix::init() but all platforms
4554   // have to set it the same so we have to mirror Solaris.
4555   DEBUG_ONLY(os::set_mutex_init_done();)
4556 
4557   os::Posix::init_2();
4558 
4559   Linux::fast_thread_clock_init();
4560 
4561   if (PosixSignals::init() == JNI_ERR) {
4562     return JNI_ERR;
4563   }
4564 
4565   if (AdjustStackSizeForTLS) {
4566     get_minstack_init();
4567   }
4568 
4569   // Check and sets minimum stack sizes against command line options
4570   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
4571     return JNI_ERR;
4572   }
4573 
4574 #if defined(IA32) && !defined(ZERO)
4575   // Need to ensure we've determined the process's initial stack to
4576   // perform the workaround
4577   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4578   workaround_expand_exec_shield_cs_limit();
4579 #else
4580   suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
4581   if (!suppress_primordial_thread_resolution) {
4582     Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4583   }
4584 #endif
4585 
4586   Linux::libpthread_init();
4587   Linux::sched_getcpu_init();
4588   log_info(os)("HotSpot is running with %s, %s",
4589                Linux::libc_version(), Linux::libpthread_version());
4590 
4591   if (UseNUMA || UseNUMAInterleaving) {
4592     Linux::numa_init();
4593   }
4594 
4595   if (MaxFDLimit) {
4596     // set the number of file descriptors to max. print out error
4597     // if getrlimit/setrlimit fails but continue regardless.
4598     struct rlimit nbr_files;
4599     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4600     if (status != 0) {
4601       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
4602     } else {
4603       nbr_files.rlim_cur = nbr_files.rlim_max;
4604       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4605       if (status != 0) {
4606         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
4607       }
4608     }
4609   }
4610 
4611   // at-exit methods are called in the reverse order of their registration.
4612   // atexit functions are called on return from main or as a result of a
4613   // call to exit(3C). There can be only 32 of these functions registered
4614   // and atexit() does not set errno.
4615 
4616   if (PerfAllowAtExitRegistration) {
4617     // only register atexit functions if PerfAllowAtExitRegistration is set.
4618     // atexit functions can be delayed until process exit time, which
4619     // can be problematic for embedded VM situations. Embedded VMs should
4620     // call DestroyJavaVM() to assure that VM resources are released.
4621 
4622     // note: perfMemory_exit_helper atexit function may be removed in
4623     // the future if the appropriate cleanup code can be added to the
4624     // VM_Exit VMOperation's doit method.
4625     if (atexit(perfMemory_exit_helper) != 0) {
4626       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4627     }
4628   }
4629 
4630   // initialize thread priority policy
4631   prio_init();
4632 
4633   if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
4634     set_coredump_filter(DAX_SHARED_BIT);
4635   }
4636 
4637   if (DumpPrivateMappingsInCore) {
4638     set_coredump_filter(FILE_BACKED_PVT_BIT);
4639   }
4640 
4641   if (DumpSharedMappingsInCore) {
4642     set_coredump_filter(FILE_BACKED_SHARED_BIT);
4643   }
4644 
4645   if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) {
4646     // Disable code cache flushing to ensure the map file written at
4647     // exit contains all nmethods generated during execution.
4648     FLAG_SET_DEFAULT(UseCodeCacheFlushing, false);
4649   }
4650 
4651   return JNI_OK;
4652 }
4653 
4654 // older glibc versions don't have this macro (which expands to
4655 // an optimized bit-counting function) so we have to roll our own
4656 #ifndef CPU_COUNT
4657 
4658 static int _cpu_count(const cpu_set_t* cpus) {
4659   int count = 0;
4660   // only look up to the number of configured processors
4661   for (int i = 0; i < os::processor_count(); i++) {
4662     if (CPU_ISSET(i, cpus)) {
4663       count++;
4664     }
4665   }
4666   return count;
4667 }
4668 
4669 #define CPU_COUNT(cpus) _cpu_count(cpus)
4670 
4671 #endif // CPU_COUNT
4672 
4673 // Get the current number of available processors for this process.
4674 // This value can change at any time during a process's lifetime.
4675 // sched_getaffinity gives an accurate answer as it accounts for cpusets.
4676 // If it appears there may be more than 1024 processors then we do a
4677 // dynamic check - see 6515172 for details.
4678 // If anything goes wrong we fallback to returning the number of online
4679 // processors - which can be greater than the number available to the process.
4680 static int get_active_processor_count() {
4681   // Note: keep this function, with its CPU_xx macros, *outside* the os namespace (see JDK-8289477).
4682   cpu_set_t cpus;  // can represent at most 1024 (CPU_SETSIZE) processors
4683   cpu_set_t* cpus_p = &cpus;
4684   int cpus_size = sizeof(cpu_set_t);
4685 
4686   int configured_cpus = os::processor_count();  // upper bound on available cpus
4687   int cpu_count = 0;
4688 
4689 // old build platforms may not support dynamic cpu sets
4690 #ifdef CPU_ALLOC
4691 
4692   // To enable easy testing of the dynamic path on different platforms we
4693   // introduce a diagnostic flag: UseCpuAllocPath
4694   if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
4695     // kernel may use a mask bigger than cpu_set_t
4696     log_trace(os)("active_processor_count: using dynamic path %s"
4697                   "- configured processors: %d",
4698                   UseCpuAllocPath ? "(forced) " : "",
4699                   configured_cpus);
4700     cpus_p = CPU_ALLOC(configured_cpus);
4701     if (cpus_p != NULL) {
4702       cpus_size = CPU_ALLOC_SIZE(configured_cpus);
4703       // zero it just to be safe
4704       CPU_ZERO_S(cpus_size, cpus_p);
4705     }
4706     else {
4707        // failed to allocate so fallback to online cpus
4708        int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4709        log_trace(os)("active_processor_count: "
4710                      "CPU_ALLOC failed (%s) - using "
4711                      "online processor count: %d",
4712                      os::strerror(errno), online_cpus);
4713        return online_cpus;
4714     }
4715   }
4716   else {
4717     log_trace(os)("active_processor_count: using static path - configured processors: %d",
4718                   configured_cpus);
4719   }
4720 #else // CPU_ALLOC
4721 // these stubs won't be executed
4722 #define CPU_COUNT_S(size, cpus) -1
4723 #define CPU_FREE(cpus)
4724 
4725   log_trace(os)("active_processor_count: only static path available - configured processors: %d",
4726                 configured_cpus);
4727 #endif // CPU_ALLOC
4728 
4729   // pid 0 means the current thread - which we have to assume represents the process
4730   if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
4731     if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4732       cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
4733     }
4734     else {
4735       cpu_count = CPU_COUNT(cpus_p);
4736     }
4737     log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
4738   }
4739   else {
4740     cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
4741     warning("sched_getaffinity failed (%s)- using online processor count (%d) "
4742             "which may exceed available processors", os::strerror(errno), cpu_count);
4743   }
4744 
4745   if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4746     CPU_FREE(cpus_p);
4747   }
4748 
4749   assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
4750   return cpu_count;
4751 }
4752 
4753 int os::Linux::active_processor_count() {
4754   return get_active_processor_count();
4755 }
4756 
4757 // Determine the active processor count from one of
4758 // three different sources:
4759 //
4760 // 1. User option -XX:ActiveProcessorCount
4761 // 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
4762 // 3. extracted from cgroup cpu subsystem (shares and quotas)
4763 //
4764 // Option 1, if specified, will always override.
4765 // If the cgroup subsystem is active and configured, we
4766 // will return the min of the cgroup and option 2 results.
4767 // This is required since tools, such as numactl, that
4768 // alter cpu affinity do not update cgroup subsystem
4769 // cpuset configuration files.
4770 int os::active_processor_count() {
4771   // User has overridden the number of active processors
4772   if (ActiveProcessorCount > 0) {
4773     log_trace(os)("active_processor_count: "
4774                   "active processor count set by user : %d",
4775                   ActiveProcessorCount);
4776     return ActiveProcessorCount;
4777   }
4778 
4779   int active_cpus;
4780   if (OSContainer::is_containerized()) {
4781     active_cpus = OSContainer::active_processor_count();
4782     log_trace(os)("active_processor_count: determined by OSContainer: %d",
4783                    active_cpus);
4784   } else {
4785     active_cpus = os::Linux::active_processor_count();
4786   }
4787 
4788   return active_cpus;
4789 }
4790 
4791 static bool should_warn_invalid_processor_id() {
4792   if (os::processor_count() == 1) {
4793     // Don't warn if we only have one processor
4794     return false;
4795   }
4796 
4797   static volatile int warn_once = 1;
4798 
4799   if (Atomic::load(&warn_once) == 0 ||
4800       Atomic::xchg(&warn_once, 0) == 0) {
4801     // Don't warn more than once
4802     return false;
4803   }
4804 
4805   return true;
4806 }
4807 
4808 uint os::processor_id() {
4809   const int id = Linux::sched_getcpu();
4810 
4811   if (id < processor_count()) {
4812     return (uint)id;
4813   }
4814 
4815   // Some environments (e.g. openvz containers and the rr debugger) incorrectly
4816   // report a processor id that is higher than the number of processors available.
4817   // This is problematic, for example, when implementing CPU-local data structures,
4818   // where the processor id is used to index into an array of length processor_count().
4819   // If this happens we return 0 here. This is is safe since we always have at least
4820   // one processor, but it's not optimal for performance if we're actually executing
4821   // in an environment with more than one processor.
4822   if (should_warn_invalid_processor_id()) {
4823     log_warning(os)("Invalid processor id reported by the operating system "
4824                     "(got processor id %d, valid processor id range is 0-%d)",
4825                     id, processor_count() - 1);
4826     log_warning(os)("Falling back to assuming processor id is 0. "
4827                     "This could have a negative impact on performance.");
4828   }
4829 
4830   return 0;
4831 }
4832 
4833 void os::set_native_thread_name(const char *name) {
4834   if (Linux::_pthread_setname_np) {
4835     char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
4836     snprintf(buf, sizeof(buf), "%s", name);
4837     buf[sizeof(buf) - 1] = '\0';
4838     const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
4839     // ERANGE should not happen; all other errors should just be ignored.
4840     assert(rc != ERANGE, "pthread_setname_np failed");
4841   }
4842 }
4843 
4844 bool os::bind_to_processor(uint processor_id) {
4845   // Not yet implemented.
4846   return false;
4847 }
4848 
4849 ////////////////////////////////////////////////////////////////////////////////
4850 // debug support
4851 
4852 bool os::find(address addr, outputStream* st) {
4853   Dl_info dlinfo;
4854   memset(&dlinfo, 0, sizeof(dlinfo));
4855   if (dladdr(addr, &dlinfo) != 0) {
4856     st->print(PTR_FORMAT ": ", p2i(addr));
4857     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
4858       st->print("%s+" PTR_FORMAT, dlinfo.dli_sname,
4859                 p2i(addr) - p2i(dlinfo.dli_saddr));
4860     } else if (dlinfo.dli_fbase != NULL) {
4861       st->print("<offset " PTR_FORMAT ">", p2i(addr) - p2i(dlinfo.dli_fbase));
4862     } else {
4863       st->print("<absolute address>");
4864     }
4865     if (dlinfo.dli_fname != NULL) {
4866       st->print(" in %s", dlinfo.dli_fname);
4867     }
4868     if (dlinfo.dli_fbase != NULL) {
4869       st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase));
4870     }
4871     st->cr();
4872 
4873     if (Verbose) {
4874       // decode some bytes around the PC
4875       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
4876       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
4877       address       lowest = (address) dlinfo.dli_sname;
4878       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
4879       if (begin < lowest)  begin = lowest;
4880       Dl_info dlinfo2;
4881       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
4882           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
4883         end = (address) dlinfo2.dli_saddr;
4884       }
4885       Disassembler::decode(begin, end, st);
4886     }
4887     return true;
4888   }
4889   return false;
4890 }
4891 
4892 ////////////////////////////////////////////////////////////////////////////////
4893 // misc
4894 
4895 // This does not do anything on Linux. This is basically a hook for being
4896 // able to use structured exception handling (thread-local exception filters)
4897 // on, e.g., Win32.
4898 void
4899 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
4900                          JavaCallArguments* args, JavaThread* thread) {
4901   f(value, method, args, thread);
4902 }
4903 
4904 // This code originates from JDK's sysOpen and open64_w
4905 // from src/solaris/hpi/src/system_md.c
4906 
4907 int os::open(const char *path, int oflag, int mode) {
4908   if (strlen(path) > MAX_PATH - 1) {
4909     errno = ENAMETOOLONG;
4910     return -1;
4911   }
4912 
4913   // All file descriptors that are opened in the Java process and not
4914   // specifically destined for a subprocess should have the close-on-exec
4915   // flag set.  If we don't set it, then careless 3rd party native code
4916   // might fork and exec without closing all appropriate file descriptors
4917   // (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
4918   // turn might:
4919   //
4920   // - cause end-of-file to fail to be detected on some file
4921   //   descriptors, resulting in mysterious hangs, or
4922   //
4923   // - might cause an fopen in the subprocess to fail on a system
4924   //   suffering from bug 1085341.
4925   //
4926   // (Yes, the default setting of the close-on-exec flag is a Unix
4927   // design flaw)
4928   //
4929   // See:
4930   // 1085341: 32-bit stdio routines should support file descriptors >255
4931   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4932   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4933   //
4934   // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
4935   // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
4936   // because it saves a system call and removes a small window where the flag
4937   // is unset.  On ancient Linux kernels the O_CLOEXEC flag will be ignored
4938   // and we fall back to using FD_CLOEXEC (see below).
4939 #ifdef O_CLOEXEC
4940   oflag |= O_CLOEXEC;
4941 #endif
4942 
4943   int fd = ::open64(path, oflag, mode);
4944   if (fd == -1) return -1;
4945 
4946   //If the open succeeded, the file might still be a directory
4947   {
4948     struct stat64 buf64;
4949     int ret = ::fstat64(fd, &buf64);
4950     int st_mode = buf64.st_mode;
4951 
4952     if (ret != -1) {
4953       if ((st_mode & S_IFMT) == S_IFDIR) {
4954         errno = EISDIR;
4955         ::close(fd);
4956         return -1;
4957       }
4958     } else {
4959       ::close(fd);
4960       return -1;
4961     }
4962   }
4963 
4964 #ifdef FD_CLOEXEC
4965   // Validate that the use of the O_CLOEXEC flag on open above worked.
4966   // With recent kernels, we will perform this check exactly once.
4967   static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
4968   if (!O_CLOEXEC_is_known_to_work) {
4969     int flags = ::fcntl(fd, F_GETFD);
4970     if (flags != -1) {
4971       if ((flags & FD_CLOEXEC) != 0)
4972         O_CLOEXEC_is_known_to_work = 1;
4973       else
4974         ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4975     }
4976   }
4977 #endif
4978 
4979   return fd;
4980 }
4981 
4982 
4983 // create binary file, rewriting existing file if required
4984 int os::create_binary_file(const char* path, bool rewrite_existing) {
4985   int oflags = O_WRONLY | O_CREAT;
4986   oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
4987   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4988 }
4989 
4990 // return current position of file pointer
4991 jlong os::current_file_offset(int fd) {
4992   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4993 }
4994 
4995 // move file pointer to the specified offset
4996 jlong os::seek_to_file_offset(int fd, jlong offset) {
4997   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4998 }
4999 
5000 // This code originates from JDK's sysAvailable
5001 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
5002 
5003 int os::available(int fd, jlong *bytes) {
5004   jlong cur, end;
5005   int mode;
5006   struct stat64 buf64;
5007 
5008   if (::fstat64(fd, &buf64) >= 0) {
5009     mode = buf64.st_mode;
5010     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5011       int n;
5012       if (::ioctl(fd, FIONREAD, &n) >= 0) {
5013         *bytes = n;
5014         return 1;
5015       }
5016     }
5017   }
5018   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5019     return 0;
5020   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5021     return 0;
5022   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5023     return 0;
5024   }
5025   *bytes = end - cur;
5026   return 1;
5027 }
5028 
5029 // Map a block of memory.
5030 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5031                         char *addr, size_t bytes, bool read_only,
5032                         bool allow_exec) {
5033   int prot;
5034   int flags = MAP_PRIVATE;
5035 
5036   if (read_only) {
5037     prot = PROT_READ;
5038   } else {
5039     prot = PROT_READ | PROT_WRITE;
5040   }
5041 
5042   if (allow_exec) {
5043     prot |= PROT_EXEC;
5044   }
5045 
5046   if (addr != NULL) {
5047     flags |= MAP_FIXED;
5048   }
5049 
5050   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5051                                      fd, file_offset);
5052   if (mapped_address == MAP_FAILED) {
5053     return NULL;
5054   }
5055   return mapped_address;
5056 }
5057 
5058 
5059 // Remap a block of memory.
5060 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5061                           char *addr, size_t bytes, bool read_only,
5062                           bool allow_exec) {
5063   // same as map_memory() on this OS
5064   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5065                         allow_exec);
5066 }
5067 
5068 
5069 // Unmap a block of memory.
5070 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5071   return munmap(addr, bytes) == 0;
5072 }
5073 
5074 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5075 
5076 static jlong fast_cpu_time(Thread *thread) {
5077     clockid_t clockid;
5078     int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(),
5079                                               &clockid);
5080     if (rc == 0) {
5081       return os::Linux::fast_thread_cpu_time(clockid);
5082     } else {
5083       // It's possible to encounter a terminated native thread that failed
5084       // to detach itself from the VM - which should result in ESRCH.
5085       assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
5086       return -1;
5087     }
5088 }
5089 
5090 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5091 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5092 // of a thread.
5093 //
5094 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5095 // the fast estimate available on the platform.
5096 
5097 jlong os::current_thread_cpu_time() {
5098   if (os::Linux::supports_fast_thread_cpu_time()) {
5099     return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5100   } else {
5101     // return user + sys since the cost is the same
5102     return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5103   }
5104 }
5105 
5106 jlong os::thread_cpu_time(Thread* thread) {
5107   // consistent with what current_thread_cpu_time() returns
5108   if (os::Linux::supports_fast_thread_cpu_time()) {
5109     return fast_cpu_time(thread);
5110   } else {
5111     return slow_thread_cpu_time(thread, true /* user + sys */);
5112   }
5113 }
5114 
5115 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5116   if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5117     return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5118   } else {
5119     return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5120   }
5121 }
5122 
5123 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5124   if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5125     return fast_cpu_time(thread);
5126   } else {
5127     return slow_thread_cpu_time(thread, user_sys_cpu_time);
5128   }
5129 }
5130 
5131 //  -1 on error.
5132 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5133   pid_t  tid = thread->osthread()->thread_id();
5134   char *s;
5135   char stat[2048];
5136   int statlen;
5137   char proc_name[64];
5138   int count;
5139   long sys_time, user_time;
5140   char cdummy;
5141   int idummy;
5142   long ldummy;
5143   FILE *fp;
5144 
5145   snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
5146   fp = fopen(proc_name, "r");
5147   if (fp == NULL) return -1;
5148   statlen = fread(stat, 1, 2047, fp);
5149   stat[statlen] = '\0';
5150   fclose(fp);
5151 
5152   // Skip pid and the command string. Note that we could be dealing with
5153   // weird command names, e.g. user could decide to rename java launcher
5154   // to "java 1.4.2 :)", then the stat file would look like
5155   //                1234 (java 1.4.2 :)) R ... ...
5156   // We don't really need to know the command string, just find the last
5157   // occurrence of ")" and then start parsing from there. See bug 4726580.
5158   s = strrchr(stat, ')');
5159   if (s == NULL) return -1;
5160 
5161   // Skip blank chars
5162   do { s++; } while (s && isspace(*s));
5163 
5164   count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5165                  &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5166                  &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5167                  &user_time, &sys_time);
5168   if (count != 13) return -1;
5169   if (user_sys_cpu_time) {
5170     return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5171   } else {
5172     return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5173   }
5174 }
5175 
5176 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5177   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
5178   info_ptr->may_skip_backward = false;     // elapsed time not wall time
5179   info_ptr->may_skip_forward = false;      // elapsed time not wall time
5180   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
5181 }
5182 
5183 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5184   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
5185   info_ptr->may_skip_backward = false;     // elapsed time not wall time
5186   info_ptr->may_skip_forward = false;      // elapsed time not wall time
5187   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
5188 }
5189 
5190 bool os::is_thread_cpu_time_supported() {
5191   return true;
5192 }
5193 
5194 // System loadavg support.  Returns -1 if load average cannot be obtained.
5195 // Linux doesn't yet have a (official) notion of processor sets,
5196 // so just return the system wide load average.
5197 int os::loadavg(double loadavg[], int nelem) {
5198   return ::getloadavg(loadavg, nelem);
5199 }
5200 
5201 void os::pause() {
5202   char filename[MAX_PATH];
5203   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5204     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5205   } else {
5206     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5207   }
5208 
5209   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5210   if (fd != -1) {
5211     struct stat buf;
5212     ::close(fd);
5213     while (::stat(filename, &buf) == 0) {
5214       (void)::poll(NULL, 0, 100);
5215     }
5216   } else {
5217     jio_fprintf(stderr,
5218                 "Could not open pause file '%s', continuing immediately.\n", filename);
5219   }
5220 }
5221 
5222 // Get the default path to the core file
5223 // Returns the length of the string
5224 int os::get_core_path(char* buffer, size_t bufferSize) {
5225   /*
5226    * Max length of /proc/sys/kernel/core_pattern is 128 characters.
5227    * See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
5228    */
5229   const int core_pattern_len = 129;
5230   char core_pattern[core_pattern_len] = {0};
5231 
5232   int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
5233   if (core_pattern_file == -1) {
5234     return -1;
5235   }
5236 
5237   ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
5238   ::close(core_pattern_file);
5239   if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') {
5240     return -1;
5241   }
5242   if (core_pattern[ret-1] == '\n') {
5243     core_pattern[ret-1] = '\0';
5244   } else {
5245     core_pattern[ret] = '\0';
5246   }
5247 
5248   // Replace the %p in the core pattern with the process id. NOTE: we do this
5249   // only if the pattern doesn't start with "|", and we support only one %p in
5250   // the pattern.
5251   char *pid_pos = strstr(core_pattern, "%p");
5252   const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";  // skip over the "%p"
5253   int written;
5254 
5255   if (core_pattern[0] == '/') {
5256     if (pid_pos != NULL) {
5257       *pid_pos = '\0';
5258       written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,
5259                              current_process_id(), tail);
5260     } else {
5261       written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
5262     }
5263   } else {
5264     char cwd[PATH_MAX];
5265 
5266     const char* p = get_current_directory(cwd, PATH_MAX);
5267     if (p == NULL) {
5268       return -1;
5269     }
5270 
5271     if (core_pattern[0] == '|') {
5272       written = jio_snprintf(buffer, bufferSize,
5273                              "\"%s\" (or dumping to %s/core.%d)",
5274                              &core_pattern[1], p, current_process_id());
5275     } else if (pid_pos != NULL) {
5276       *pid_pos = '\0';
5277       written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,
5278                              current_process_id(), tail);
5279     } else {
5280       written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
5281     }
5282   }
5283 
5284   if (written < 0) {
5285     return -1;
5286   }
5287 
5288   if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) {
5289     int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
5290 
5291     if (core_uses_pid_file != -1) {
5292       char core_uses_pid = 0;
5293       ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
5294       ::close(core_uses_pid_file);
5295 
5296       if (core_uses_pid == '1') {
5297         jio_snprintf(buffer + written, bufferSize - written,
5298                                           ".%d", current_process_id());
5299       }
5300     }
5301   }
5302 
5303   return strlen(buffer);
5304 }
5305 
5306 bool os::start_debugging(char *buf, int buflen) {
5307   int len = (int)strlen(buf);
5308   char *p = &buf[len];
5309 
5310   jio_snprintf(p, buflen-len,
5311                "\n\n"
5312                "Do you want to debug the problem?\n\n"
5313                "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n"
5314                "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
5315                "Otherwise, press RETURN to abort...",
5316                os::current_process_id(), os::current_process_id(),
5317                os::current_thread_id(), os::current_thread_id());
5318 
5319   bool yes = os::message_box("Unexpected Error", buf);
5320 
5321   if (yes) {
5322     // yes, user asked VM to launch debugger
5323     jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d",
5324                  os::current_process_id(), os::current_process_id());
5325 
5326     os::fork_and_exec(buf);
5327     yes = false;
5328   }
5329   return yes;
5330 }
5331 
5332 
5333 // Java/Compiler thread:
5334 //
5335 //   Low memory addresses
5336 // P0 +------------------------+
5337 //    |                        |\  Java thread created by VM does not have glibc
5338 //    |    glibc guard page    | - guard page, attached Java thread usually has
5339 //    |                        |/  1 glibc guard page.
5340 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5341 //    |                        |\
5342 //    |  HotSpot Guard Pages   | - red, yellow and reserved pages
5343 //    |                        |/
5344 //    +------------------------+ StackOverflow::stack_reserved_zone_base()
5345 //    |                        |\
5346 //    |      Normal Stack      | -
5347 //    |                        |/
5348 // P2 +------------------------+ Thread::stack_base()
5349 //
5350 // Non-Java thread:
5351 //
5352 //   Low memory addresses
5353 // P0 +------------------------+
5354 //    |                        |\
5355 //    |  glibc guard page      | - usually 1 page
5356 //    |                        |/
5357 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5358 //    |                        |\
5359 //    |      Normal Stack      | -
5360 //    |                        |/
5361 // P2 +------------------------+ Thread::stack_base()
5362 //
5363 // ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size
5364 //    returned from pthread_attr_getstack().
5365 // ** Due to NPTL implementation error, linux takes the glibc guard page out
5366 //    of the stack size given in pthread_attr. We work around this for
5367 //    threads created by the VM. (We adapt bottom to be P1 and size accordingly.)
5368 //
5369 #ifndef ZERO
5370 static void current_stack_region(address * bottom, size_t * size) {
5371   if (os::is_primordial_thread()) {
5372     // primordial thread needs special handling because pthread_getattr_np()
5373     // may return bogus value.
5374     *bottom = os::Linux::initial_thread_stack_bottom();
5375     *size   = os::Linux::initial_thread_stack_size();
5376   } else {
5377     pthread_attr_t attr;
5378 
5379     int rslt = pthread_getattr_np(pthread_self(), &attr);
5380 
5381     // JVM needs to know exact stack location, abort if it fails
5382     if (rslt != 0) {
5383       if (rslt == ENOMEM) {
5384         vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
5385       } else {
5386         fatal("pthread_getattr_np failed with error = %d", rslt);
5387       }
5388     }
5389 
5390     if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
5391       fatal("Cannot locate current stack attributes!");
5392     }
5393 
5394     // Work around NPTL stack guard error.
5395     size_t guard_size = 0;
5396     rslt = pthread_attr_getguardsize(&attr, &guard_size);
5397     if (rslt != 0) {
5398       fatal("pthread_attr_getguardsize failed with error = %d", rslt);
5399     }
5400     *bottom += guard_size;
5401     *size   -= guard_size;
5402 
5403     pthread_attr_destroy(&attr);
5404 
5405   }
5406   assert(os::current_stack_pointer() >= *bottom &&
5407          os::current_stack_pointer() < *bottom + *size, "just checking");
5408 }
5409 
5410 address os::current_stack_base() {
5411   address bottom;
5412   size_t size;
5413   current_stack_region(&bottom, &size);
5414   return (bottom + size);
5415 }
5416 
5417 size_t os::current_stack_size() {
5418   // This stack size includes the usable stack and HotSpot guard pages
5419   // (for the threads that have Hotspot guard pages).
5420   address bottom;
5421   size_t size;
5422   current_stack_region(&bottom, &size);
5423   return size;
5424 }
5425 #endif
5426 
5427 static inline struct timespec get_mtime(const char* filename) {
5428   struct stat st;
5429   int ret = os::stat(filename, &st);
5430   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
5431   return st.st_mtim;
5432 }
5433 
5434 int os::compare_file_modified_times(const char* file1, const char* file2) {
5435   struct timespec filetime1 = get_mtime(file1);
5436   struct timespec filetime2 = get_mtime(file2);
5437   int diff = filetime1.tv_sec - filetime2.tv_sec;
5438   if (diff == 0) {
5439     return filetime1.tv_nsec - filetime2.tv_nsec;
5440   }
5441   return diff;
5442 }
5443 
5444 bool os::supports_map_sync() {
5445   return true;
5446 }
5447 
5448 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
5449   // Note: all ranges are "[..)"
5450   unsigned long long start = (unsigned long long)addr;
5451   unsigned long long end = start + bytes;
5452   FILE* f = ::fopen("/proc/self/maps", "r");
5453   int num_found = 0;
5454   if (f != NULL) {
5455     st->print_cr("Range [%llx-%llx) contains: ", start, end);
5456     char line[512];
5457     while(fgets(line, sizeof(line), f) == line) {
5458       unsigned long long segment_start = 0;
5459       unsigned long long segment_end = 0;
5460       if (::sscanf(line, "%llx-%llx", &segment_start, &segment_end) == 2) {
5461         // Lets print out every range which touches ours.
5462         if (segment_start < end && segment_end > start) {
5463           num_found ++;
5464           st->print("%s", line); // line includes \n
5465         }
5466       }
5467     }
5468     ::fclose(f);
5469     if (num_found == 0) {
5470       st->print_cr("nothing.");
5471     }
5472     st->cr();
5473   }
5474 }
--- EOF ---