1 /* 2 * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, 2022 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 // no precompiled headers 27 #include "jvm.h" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvmtifiles/jvmti.h" 35 #include "logging/log.hpp" 36 #include "logging/logStream.hpp" 37 #include "memory/allocation.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "os_linux.inline.hpp" 40 #include "os_posix.inline.hpp" 41 #include "os_share_linux.hpp" 42 #include "osContainer_linux.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm_misc.hpp" 45 #include "runtime/arguments.hpp" 46 #include "runtime/atomic.hpp" 47 #include "runtime/globals.hpp" 48 #include "runtime/globals_extension.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/init.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/osThread.hpp" 56 #include "runtime/perfMemory.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/statSampler.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "runtime/threadCritical.hpp" 62 #include "runtime/threadSMR.hpp" 63 #include "runtime/timer.hpp" 64 #include "runtime/vm_version.hpp" 65 #include "signals_posix.hpp" 66 #include "semaphore_posix.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/align.hpp" 70 #include "utilities/decoder.hpp" 71 #include "utilities/defaultStream.hpp" 72 #include "utilities/events.hpp" 73 #include "utilities/elfFile.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/macros.hpp" 76 #include "utilities/powerOfTwo.hpp" 77 #include "utilities/vmError.hpp" 78 79 // put OS-includes here 80 # include <sys/types.h> 81 # include <sys/mman.h> 82 # include <sys/stat.h> 83 # include <sys/select.h> 84 # include <pthread.h> 85 # include <signal.h> 86 # include <endian.h> 87 # include <errno.h> 88 # include <dlfcn.h> 89 # include <stdio.h> 90 # include <unistd.h> 91 # include <sys/resource.h> 92 # include <pthread.h> 93 # include <sys/stat.h> 94 # include <sys/time.h> 95 # include <sys/times.h> 96 # include <sys/utsname.h> 97 # include <sys/socket.h> 98 # include <pwd.h> 99 # include <poll.h> 100 # include <fcntl.h> 101 # include <string.h> 102 # include <syscall.h> 103 # include <sys/sysinfo.h> 104 # include <sys/ipc.h> 105 # include <sys/shm.h> 106 # include <link.h> 107 # include <stdint.h> 108 # include <inttypes.h> 109 # include <sys/ioctl.h> 110 # include <linux/elf-em.h> 111 #ifdef __GLIBC__ 112 # include <malloc.h> 113 #endif 114 115 #ifndef _GNU_SOURCE 116 #define _GNU_SOURCE 117 #include <sched.h> 118 #undef _GNU_SOURCE 119 #else 120 #include <sched.h> 121 #endif 122 123 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling 124 // getrusage() is prepared to handle the associated failure. 125 #ifndef RUSAGE_THREAD 126 #define RUSAGE_THREAD (1) /* only the calling thread */ 127 #endif 128 129 #define MAX_PATH (2 * K) 130 131 #define MAX_SECS 100000000 132 133 // for timer info max values which include all bits 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 135 136 #ifdef MUSL_LIBC 137 // dlvsym is not a part of POSIX 138 // and musl libc doesn't implement it. 139 static void *dlvsym(void *handle, 140 const char *symbol, 141 const char *version) { 142 // load the latest version of symbol 143 return dlsym(handle, symbol); 144 } 145 #endif 146 147 enum CoredumpFilterBit { 148 FILE_BACKED_PVT_BIT = 1 << 2, 149 FILE_BACKED_SHARED_BIT = 1 << 3, 150 LARGEPAGES_BIT = 1 << 6, 151 DAX_SHARED_BIT = 1 << 8 152 }; 153 154 //////////////////////////////////////////////////////////////////////////////// 155 // global variables 156 julong os::Linux::_physical_memory = 0; 157 158 address os::Linux::_initial_thread_stack_bottom = NULL; 159 uintptr_t os::Linux::_initial_thread_stack_size = 0; 160 161 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL; 162 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL; 163 pthread_t os::Linux::_main_thread; 164 int os::Linux::_page_size = -1; 165 bool os::Linux::_supports_fast_thread_cpu_time = false; 166 const char * os::Linux::_libc_version = NULL; 167 const char * os::Linux::_libpthread_version = NULL; 168 size_t os::Linux::_default_large_page_size = 0; 169 170 #ifdef __GLIBC__ 171 os::Linux::mallinfo_func_t os::Linux::_mallinfo = NULL; 172 os::Linux::mallinfo2_func_t os::Linux::_mallinfo2 = NULL; 173 #endif // __GLIBC__ 174 175 static jlong initial_time_count=0; 176 177 static int clock_tics_per_sec = 100; 178 179 // If the VM might have been created on the primordial thread, we need to resolve the 180 // primordial thread stack bounds and check if the current thread might be the 181 // primordial thread in places. If we know that the primordial thread is never used, 182 // such as when the VM was created by one of the standard java launchers, we can 183 // avoid this 184 static bool suppress_primordial_thread_resolution = false; 185 186 // utility functions 187 188 julong os::available_memory() { 189 return Linux::available_memory(); 190 } 191 192 julong os::Linux::available_memory() { 193 // values in struct sysinfo are "unsigned long" 194 struct sysinfo si; 195 julong avail_mem; 196 197 if (OSContainer::is_containerized()) { 198 jlong mem_limit = OSContainer::memory_limit_in_bytes(); 199 jlong mem_usage; 200 if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) { 201 log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage); 202 } 203 if (mem_limit > 0 && mem_usage > 0) { 204 avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0; 205 log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem); 206 return avail_mem; 207 } 208 } 209 210 sysinfo(&si); 211 avail_mem = (julong)si.freeram * si.mem_unit; 212 log_trace(os)("available memory: " JULONG_FORMAT, avail_mem); 213 return avail_mem; 214 } 215 216 julong os::physical_memory() { 217 jlong phys_mem = 0; 218 if (OSContainer::is_containerized()) { 219 jlong mem_limit; 220 if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) { 221 log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit); 222 return mem_limit; 223 } 224 } 225 226 phys_mem = Linux::physical_memory(); 227 log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem); 228 return phys_mem; 229 } 230 231 static uint64_t initial_total_ticks = 0; 232 static uint64_t initial_steal_ticks = 0; 233 static bool has_initial_tick_info = false; 234 235 static void next_line(FILE *f) { 236 int c; 237 do { 238 c = fgetc(f); 239 } while (c != '\n' && c != EOF); 240 } 241 242 bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) { 243 FILE* fh; 244 uint64_t userTicks, niceTicks, systemTicks, idleTicks; 245 // since at least kernel 2.6 : iowait: time waiting for I/O to complete 246 // irq: time servicing interrupts; softirq: time servicing softirqs 247 uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0; 248 // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment 249 uint64_t stealTicks = 0; 250 // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the 251 // control of the Linux kernel 252 uint64_t guestNiceTicks = 0; 253 int logical_cpu = -1; 254 const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5; 255 int n; 256 257 memset(pticks, 0, sizeof(CPUPerfTicks)); 258 259 if ((fh = fopen("/proc/stat", "r")) == NULL) { 260 return false; 261 } 262 263 if (which_logical_cpu == -1) { 264 n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " 265 UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " 266 UINT64_FORMAT " " UINT64_FORMAT " ", 267 &userTicks, &niceTicks, &systemTicks, &idleTicks, 268 &iowTicks, &irqTicks, &sirqTicks, 269 &stealTicks, &guestNiceTicks); 270 } else { 271 // Move to next line 272 next_line(fh); 273 274 // find the line for requested cpu faster to just iterate linefeeds? 275 for (int i = 0; i < which_logical_cpu; i++) { 276 next_line(fh); 277 } 278 279 n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " 280 UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " 281 UINT64_FORMAT " " UINT64_FORMAT " ", 282 &logical_cpu, &userTicks, &niceTicks, 283 &systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks, 284 &stealTicks, &guestNiceTicks); 285 } 286 287 fclose(fh); 288 if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) { 289 return false; 290 } 291 pticks->used = userTicks + niceTicks; 292 pticks->usedKernel = systemTicks + irqTicks + sirqTicks; 293 pticks->total = userTicks + niceTicks + systemTicks + idleTicks + 294 iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks; 295 296 if (n > required_tickinfo_count + 3) { 297 pticks->steal = stealTicks; 298 pticks->has_steal_ticks = true; 299 } else { 300 pticks->steal = 0; 301 pticks->has_steal_ticks = false; 302 } 303 304 return true; 305 } 306 307 // Return true if user is running as root. 308 309 bool os::have_special_privileges() { 310 static bool init = false; 311 static bool privileges = false; 312 if (!init) { 313 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 314 init = true; 315 } 316 return privileges; 317 } 318 319 320 #ifndef SYS_gettid 321 // i386: 224, ia64: 1105, amd64: 186, sparc: 143 322 #ifdef __ia64__ 323 #define SYS_gettid 1105 324 #else 325 #ifdef __i386__ 326 #define SYS_gettid 224 327 #else 328 #ifdef __amd64__ 329 #define SYS_gettid 186 330 #else 331 #ifdef __sparc__ 332 #define SYS_gettid 143 333 #else 334 #error define gettid for the arch 335 #endif 336 #endif 337 #endif 338 #endif 339 #endif 340 341 342 // pid_t gettid() 343 // 344 // Returns the kernel thread id of the currently running thread. Kernel 345 // thread id is used to access /proc. 346 pid_t os::Linux::gettid() { 347 int rslt = syscall(SYS_gettid); 348 assert(rslt != -1, "must be."); // old linuxthreads implementation? 349 return (pid_t)rslt; 350 } 351 352 // Returns the amount of swap currently configured, in bytes. 353 // This can change at any time. 354 julong os::Linux::host_swap() { 355 struct sysinfo si; 356 sysinfo(&si); 357 return (julong)si.totalswap; 358 } 359 360 // Most versions of linux have a bug where the number of processors are 361 // determined by looking at the /proc file system. In a chroot environment, 362 // the system call returns 1. 363 static bool unsafe_chroot_detected = false; 364 static const char *unstable_chroot_error = "/proc file system not found.\n" 365 "Java may be unstable running multithreaded in a chroot " 366 "environment on Linux when /proc filesystem is not mounted."; 367 368 void os::Linux::initialize_system_info() { 369 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 370 if (processor_count() == 1) { 371 pid_t pid = os::Linux::gettid(); 372 char fname[32]; 373 jio_snprintf(fname, sizeof(fname), "/proc/%d", pid); 374 FILE *fp = fopen(fname, "r"); 375 if (fp == NULL) { 376 unsafe_chroot_detected = true; 377 } else { 378 fclose(fp); 379 } 380 } 381 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 382 assert(processor_count() > 0, "linux error"); 383 } 384 385 void os::init_system_properties_values() { 386 // The next steps are taken in the product version: 387 // 388 // Obtain the JAVA_HOME value from the location of libjvm.so. 389 // This library should be located at: 390 // <JAVA_HOME>/lib/{client|server}/libjvm.so. 391 // 392 // If "/jre/lib/" appears at the right place in the path, then we 393 // assume libjvm.so is installed in a JDK and we use this path. 394 // 395 // Otherwise exit with message: "Could not create the Java virtual machine." 396 // 397 // The following extra steps are taken in the debugging version: 398 // 399 // If "/jre/lib/" does NOT appear at the right place in the path 400 // instead of exit check for $JAVA_HOME environment variable. 401 // 402 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 403 // then we append a fake suffix "hotspot/libjvm.so" to this path so 404 // it looks like libjvm.so is installed there 405 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 406 // 407 // Otherwise exit. 408 // 409 // Important note: if the location of libjvm.so changes this 410 // code needs to be changed accordingly. 411 412 // See ld(1): 413 // The linker uses the following search paths to locate required 414 // shared libraries: 415 // 1: ... 416 // ... 417 // 7: The default directories, normally /lib and /usr/lib. 418 #ifndef OVERRIDE_LIBPATH 419 #if defined(_LP64) 420 #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib" 421 #else 422 #define DEFAULT_LIBPATH "/lib:/usr/lib" 423 #endif 424 #else 425 #define DEFAULT_LIBPATH OVERRIDE_LIBPATH 426 #endif 427 428 // Base path of extensions installed on the system. 429 #define SYS_EXT_DIR "/usr/java/packages" 430 #define EXTENSIONS_DIR "/lib/ext" 431 432 // Buffer that fits several sprintfs. 433 // Note that the space for the colon and the trailing null are provided 434 // by the nulls included by the sizeof operator. 435 const size_t bufsize = 436 MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. 437 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir 438 char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 439 440 // sysclasspath, java_home, dll_dir 441 { 442 char *pslash; 443 os::jvm_path(buf, bufsize); 444 445 // Found the full path to libjvm.so. 446 // Now cut the path to <java_home>/jre if we can. 447 pslash = strrchr(buf, '/'); 448 if (pslash != NULL) { 449 *pslash = '\0'; // Get rid of /libjvm.so. 450 } 451 pslash = strrchr(buf, '/'); 452 if (pslash != NULL) { 453 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 454 } 455 Arguments::set_dll_dir(buf); 456 457 if (pslash != NULL) { 458 pslash = strrchr(buf, '/'); 459 if (pslash != NULL) { 460 *pslash = '\0'; // Get rid of /lib. 461 } 462 } 463 Arguments::set_java_home(buf); 464 if (!set_boot_path('/', ':')) { 465 vm_exit_during_initialization("Failed setting boot class path.", NULL); 466 } 467 } 468 469 // Where to look for native libraries. 470 // 471 // Note: Due to a legacy implementation, most of the library path 472 // is set in the launcher. This was to accomodate linking restrictions 473 // on legacy Linux implementations (which are no longer supported). 474 // Eventually, all the library path setting will be done here. 475 // 476 // However, to prevent the proliferation of improperly built native 477 // libraries, the new path component /usr/java/packages is added here. 478 // Eventually, all the library path setting will be done here. 479 { 480 // Get the user setting of LD_LIBRARY_PATH, and prepended it. It 481 // should always exist (until the legacy problem cited above is 482 // addressed). 483 const char *v = ::getenv("LD_LIBRARY_PATH"); 484 const char *v_colon = ":"; 485 if (v == NULL) { v = ""; v_colon = ""; } 486 // That's +1 for the colon and +1 for the trailing '\0'. 487 char *ld_library_path = NEW_C_HEAP_ARRAY(char, 488 strlen(v) + 1 + 489 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1, 490 mtInternal); 491 sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon); 492 Arguments::set_library_path(ld_library_path); 493 FREE_C_HEAP_ARRAY(char, ld_library_path); 494 } 495 496 // Extensions directories. 497 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home()); 498 Arguments::set_ext_dirs(buf); 499 500 FREE_C_HEAP_ARRAY(char, buf); 501 502 #undef DEFAULT_LIBPATH 503 #undef SYS_EXT_DIR 504 #undef EXTENSIONS_DIR 505 } 506 507 //////////////////////////////////////////////////////////////////////////////// 508 // breakpoint support 509 510 void os::breakpoint() { 511 BREAKPOINT; 512 } 513 514 extern "C" void breakpoint() { 515 // use debugger to set breakpoint here 516 } 517 518 ////////////////////////////////////////////////////////////////////////////// 519 // detecting pthread library 520 521 void os::Linux::libpthread_init() { 522 // Save glibc and pthread version strings. 523 #if !defined(_CS_GNU_LIBC_VERSION) || \ 524 !defined(_CS_GNU_LIBPTHREAD_VERSION) 525 #error "glibc too old (< 2.3.2)" 526 #endif 527 528 #ifdef MUSL_LIBC 529 // confstr() from musl libc returns EINVAL for 530 // _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION 531 os::Linux::set_libc_version("musl - unknown"); 532 os::Linux::set_libpthread_version("musl - unknown"); 533 #else 534 size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); 535 assert(n > 0, "cannot retrieve glibc version"); 536 char *str = (char *)malloc(n, mtInternal); 537 confstr(_CS_GNU_LIBC_VERSION, str, n); 538 os::Linux::set_libc_version(str); 539 540 n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); 541 assert(n > 0, "cannot retrieve pthread version"); 542 str = (char *)malloc(n, mtInternal); 543 confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); 544 os::Linux::set_libpthread_version(str); 545 #endif 546 } 547 548 ///////////////////////////////////////////////////////////////////////////// 549 // thread stack expansion 550 551 // os::Linux::manually_expand_stack() takes care of expanding the thread 552 // stack. Note that this is normally not needed: pthread stacks allocate 553 // thread stack using mmap() without MAP_NORESERVE, so the stack is already 554 // committed. Therefore it is not necessary to expand the stack manually. 555 // 556 // Manually expanding the stack was historically needed on LinuxThreads 557 // thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays 558 // it is kept to deal with very rare corner cases: 559 // 560 // For one, user may run the VM on an own implementation of threads 561 // whose stacks are - like the old LinuxThreads - implemented using 562 // mmap(MAP_GROWSDOWN). 563 // 564 // Also, this coding may be needed if the VM is running on the primordial 565 // thread. Normally we avoid running on the primordial thread; however, 566 // user may still invoke the VM on the primordial thread. 567 // 568 // The following historical comment describes the details about running 569 // on a thread stack allocated with mmap(MAP_GROWSDOWN): 570 571 572 // Force Linux kernel to expand current thread stack. If "bottom" is close 573 // to the stack guard, caller should block all signals. 574 // 575 // MAP_GROWSDOWN: 576 // A special mmap() flag that is used to implement thread stacks. It tells 577 // kernel that the memory region should extend downwards when needed. This 578 // allows early versions of LinuxThreads to only mmap the first few pages 579 // when creating a new thread. Linux kernel will automatically expand thread 580 // stack as needed (on page faults). 581 // 582 // However, because the memory region of a MAP_GROWSDOWN stack can grow on 583 // demand, if a page fault happens outside an already mapped MAP_GROWSDOWN 584 // region, it's hard to tell if the fault is due to a legitimate stack 585 // access or because of reading/writing non-exist memory (e.g. buffer 586 // overrun). As a rule, if the fault happens below current stack pointer, 587 // Linux kernel does not expand stack, instead a SIGSEGV is sent to the 588 // application (see Linux kernel fault.c). 589 // 590 // This Linux feature can cause SIGSEGV when VM bangs thread stack for 591 // stack overflow detection. 592 // 593 // Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do 594 // not use MAP_GROWSDOWN. 595 // 596 // To get around the problem and allow stack banging on Linux, we need to 597 // manually expand thread stack after receiving the SIGSEGV. 598 // 599 // There are two ways to expand thread stack to address "bottom", we used 600 // both of them in JVM before 1.5: 601 // 1. adjust stack pointer first so that it is below "bottom", and then 602 // touch "bottom" 603 // 2. mmap() the page in question 604 // 605 // Now alternate signal stack is gone, it's harder to use 2. For instance, 606 // if current sp is already near the lower end of page 101, and we need to 607 // call mmap() to map page 100, it is possible that part of the mmap() frame 608 // will be placed in page 100. When page 100 is mapped, it is zero-filled. 609 // That will destroy the mmap() frame and cause VM to crash. 610 // 611 // The following code works by adjusting sp first, then accessing the "bottom" 612 // page to force a page fault. Linux kernel will then automatically expand the 613 // stack mapping. 614 // 615 // _expand_stack_to() assumes its frame size is less than page size, which 616 // should always be true if the function is not inlined. 617 618 static void NOINLINE _expand_stack_to(address bottom) { 619 address sp; 620 size_t size; 621 volatile char *p; 622 623 // Adjust bottom to point to the largest address within the same page, it 624 // gives us a one-page buffer if alloca() allocates slightly more memory. 625 bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size()); 626 bottom += os::Linux::page_size() - 1; 627 628 // sp might be slightly above current stack pointer; if that's the case, we 629 // will alloca() a little more space than necessary, which is OK. Don't use 630 // os::current_stack_pointer(), as its result can be slightly below current 631 // stack pointer, causing us to not alloca enough to reach "bottom". 632 sp = (address)&sp; 633 634 if (sp > bottom) { 635 size = sp - bottom; 636 p = (volatile char *)alloca(size); 637 assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?"); 638 p[0] = '\0'; 639 } 640 } 641 642 void os::Linux::expand_stack_to(address bottom) { 643 _expand_stack_to(bottom); 644 } 645 646 bool os::Linux::manually_expand_stack(JavaThread * t, address addr) { 647 assert(t!=NULL, "just checking"); 648 assert(t->osthread()->expanding_stack(), "expand should be set"); 649 650 if (t->is_in_usable_stack(addr)) { 651 sigset_t mask_all, old_sigset; 652 sigfillset(&mask_all); 653 pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset); 654 _expand_stack_to(addr); 655 pthread_sigmask(SIG_SETMASK, &old_sigset, NULL); 656 return true; 657 } 658 return false; 659 } 660 661 ////////////////////////////////////////////////////////////////////////////// 662 // create new thread 663 664 // Thread start routine for all newly created threads 665 static void *thread_native_entry(Thread *thread) { 666 667 thread->record_stack_base_and_size(); 668 669 #ifndef __GLIBC__ 670 // Try to randomize the cache line index of hot stack frames. 671 // This helps when threads of the same stack traces evict each other's 672 // cache lines. The threads can be either from the same JVM instance, or 673 // from different JVM instances. The benefit is especially true for 674 // processors with hyperthreading technology. 675 // This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING 676 // and we did not see any degradation in performance without `alloca()`. 677 static int counter = 0; 678 int pid = os::current_process_id(); 679 int random = ((pid ^ counter++) & 7) * 128; 680 void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0 681 // Ensure the alloca result is used in a way that prevents the compiler from eliding it. 682 *(char *)stackmem = 1; 683 #endif 684 685 thread->initialize_thread_current(); 686 687 OSThread* osthread = thread->osthread(); 688 Monitor* sync = osthread->startThread_lock(); 689 690 osthread->set_thread_id(os::current_thread_id()); 691 692 if (UseNUMA) { 693 int lgrp_id = os::numa_get_group_id(); 694 if (lgrp_id != -1) { 695 thread->set_lgrp_id(lgrp_id); 696 } 697 } 698 // initialize signal mask for this thread 699 PosixSignals::hotspot_sigmask(thread); 700 701 // initialize floating point control register 702 os::Linux::init_thread_fpu_state(); 703 704 // handshaking with parent thread 705 { 706 MutexLocker ml(sync, Mutex::_no_safepoint_check_flag); 707 708 // notify parent thread 709 osthread->set_state(INITIALIZED); 710 sync->notify_all(); 711 712 // wait until os::start_thread() 713 while (osthread->get_state() == INITIALIZED) { 714 sync->wait_without_safepoint_check(); 715 } 716 } 717 718 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").", 719 os::current_thread_id(), (uintx) pthread_self()); 720 721 assert(osthread->pthread_id() != 0, "pthread_id was not set as expected"); 722 723 // call one more level start routine 724 thread->call_run(); 725 726 // Note: at this point the thread object may already have deleted itself. 727 // Prevent dereferencing it from here on out. 728 thread = NULL; 729 730 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").", 731 os::current_thread_id(), (uintx) pthread_self()); 732 733 return 0; 734 } 735 736 // On Linux, glibc places static TLS blocks (for __thread variables) on 737 // the thread stack. This decreases the stack size actually available 738 // to threads. 739 // 740 // For large static TLS sizes, this may cause threads to malfunction due 741 // to insufficient stack space. This is a well-known issue in glibc: 742 // http://sourceware.org/bugzilla/show_bug.cgi?id=11787. 743 // 744 // As a workaround, we call a private but assumed-stable glibc function, 745 // __pthread_get_minstack() to obtain the minstack size and derive the 746 // static TLS size from it. We then increase the user requested stack 747 // size by this TLS size. 748 // 749 // Due to compatibility concerns, this size adjustment is opt-in and 750 // controlled via AdjustStackSizeForTLS. 751 typedef size_t (*GetMinStack)(const pthread_attr_t *attr); 752 753 GetMinStack _get_minstack_func = NULL; 754 755 static void get_minstack_init() { 756 _get_minstack_func = 757 (GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack"); 758 log_info(os, thread)("Lookup of __pthread_get_minstack %s", 759 _get_minstack_func == NULL ? "failed" : "succeeded"); 760 } 761 762 // Returns the size of the static TLS area glibc puts on thread stacks. 763 // The value is cached on first use, which occurs when the first thread 764 // is created during VM initialization. 765 static size_t get_static_tls_area_size(const pthread_attr_t *attr) { 766 size_t tls_size = 0; 767 if (_get_minstack_func != NULL) { 768 // Obtain the pthread minstack size by calling __pthread_get_minstack. 769 size_t minstack_size = _get_minstack_func(attr); 770 771 // Remove non-TLS area size included in minstack size returned 772 // by __pthread_get_minstack() to get the static TLS size. 773 // In glibc before 2.27, minstack size includes guard_size. 774 // In glibc 2.27 and later, guard_size is automatically added 775 // to the stack size by pthread_create and is no longer included 776 // in minstack size. In both cases, the guard_size is taken into 777 // account, so there is no need to adjust the result for that. 778 // 779 // Although __pthread_get_minstack() is a private glibc function, 780 // it is expected to have a stable behavior across future glibc 781 // versions while glibc still allocates the static TLS blocks off 782 // the stack. Following is glibc 2.28 __pthread_get_minstack(): 783 // 784 // size_t 785 // __pthread_get_minstack (const pthread_attr_t *attr) 786 // { 787 // return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN; 788 // } 789 // 790 // 791 // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN' 792 // if check is done for precaution. 793 if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) { 794 tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN; 795 } 796 } 797 798 log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT, 799 tls_size); 800 return tls_size; 801 } 802 803 bool os::create_thread(Thread* thread, ThreadType thr_type, 804 size_t req_stack_size) { 805 assert(thread->osthread() == NULL, "caller responsible"); 806 807 // Allocate the OSThread object 808 OSThread* osthread = new OSThread(NULL, NULL); 809 if (osthread == NULL) { 810 return false; 811 } 812 813 // set the correct thread state 814 osthread->set_thread_type(thr_type); 815 816 // Initial state is ALLOCATED but not INITIALIZED 817 osthread->set_state(ALLOCATED); 818 819 thread->set_osthread(osthread); 820 821 // init thread attributes 822 pthread_attr_t attr; 823 pthread_attr_init(&attr); 824 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 825 826 // Calculate stack size if it's not specified by caller. 827 size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); 828 // In glibc versions prior to 2.7 the guard size mechanism 829 // is not implemented properly. The posix standard requires adding 830 // the size of the guard pages to the stack size, instead Linux 831 // takes the space out of 'stacksize'. Thus we adapt the requested 832 // stack_size by the size of the guard pages to mimick proper 833 // behaviour. However, be careful not to end up with a size 834 // of zero due to overflow. Don't add the guard page in that case. 835 size_t guard_size = os::Linux::default_guard_size(thr_type); 836 // Configure glibc guard page. Must happen before calling 837 // get_static_tls_area_size(), which uses the guard_size. 838 pthread_attr_setguardsize(&attr, guard_size); 839 840 size_t stack_adjust_size = 0; 841 if (AdjustStackSizeForTLS) { 842 // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size(). 843 stack_adjust_size += get_static_tls_area_size(&attr); 844 } else { 845 stack_adjust_size += guard_size; 846 } 847 848 stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size()); 849 if (stack_size <= SIZE_MAX - stack_adjust_size) { 850 stack_size += stack_adjust_size; 851 } 852 assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned"); 853 854 int status = pthread_attr_setstacksize(&attr, stack_size); 855 if (status != 0) { 856 // pthread_attr_setstacksize() function can fail 857 // if the stack size exceeds a system-imposed limit. 858 assert_status(status == EINVAL, status, "pthread_attr_setstacksize"); 859 log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k", 860 (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "), 861 stack_size / K); 862 thread->set_osthread(NULL); 863 delete osthread; 864 return false; 865 } 866 867 ThreadState state; 868 869 { 870 ResourceMark rm; 871 pthread_t tid; 872 int ret = 0; 873 int limit = 3; 874 do { 875 ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); 876 } while (ret == EAGAIN && limit-- > 0); 877 878 char buf[64]; 879 if (ret == 0) { 880 log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ", 881 thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 882 } else { 883 log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.", 884 thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 885 // Log some OS information which might explain why creating the thread failed. 886 log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads()); 887 LogStream st(Log(os, thread)::info()); 888 os::Posix::print_rlimit_info(&st); 889 os::print_memory_info(&st); 890 os::Linux::print_proc_sys_info(&st); 891 os::Linux::print_container_info(&st); 892 } 893 894 pthread_attr_destroy(&attr); 895 896 if (ret != 0) { 897 // Need to clean up stuff we've allocated so far 898 thread->set_osthread(NULL); 899 delete osthread; 900 return false; 901 } 902 903 // Store pthread info into the OSThread 904 osthread->set_pthread_id(tid); 905 906 // Wait until child thread is either initialized or aborted 907 { 908 Monitor* sync_with_child = osthread->startThread_lock(); 909 MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag); 910 while ((state = osthread->get_state()) == ALLOCATED) { 911 sync_with_child->wait_without_safepoint_check(); 912 } 913 } 914 } 915 916 // The thread is returned suspended (in state INITIALIZED), 917 // and is started higher up in the call chain 918 assert(state == INITIALIZED, "race condition"); 919 return true; 920 } 921 922 ///////////////////////////////////////////////////////////////////////////// 923 // attach existing thread 924 925 // bootstrap the main thread 926 bool os::create_main_thread(JavaThread* thread) { 927 assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread"); 928 return create_attached_thread(thread); 929 } 930 931 bool os::create_attached_thread(JavaThread* thread) { 932 #ifdef ASSERT 933 thread->verify_not_published(); 934 #endif 935 936 // Allocate the OSThread object 937 OSThread* osthread = new OSThread(NULL, NULL); 938 939 if (osthread == NULL) { 940 return false; 941 } 942 943 // Store pthread info into the OSThread 944 osthread->set_thread_id(os::Linux::gettid()); 945 osthread->set_pthread_id(::pthread_self()); 946 947 // initialize floating point control register 948 os::Linux::init_thread_fpu_state(); 949 950 // Initial thread state is RUNNABLE 951 osthread->set_state(RUNNABLE); 952 953 thread->set_osthread(osthread); 954 955 if (UseNUMA) { 956 int lgrp_id = os::numa_get_group_id(); 957 if (lgrp_id != -1) { 958 thread->set_lgrp_id(lgrp_id); 959 } 960 } 961 962 if (os::is_primordial_thread()) { 963 // If current thread is primordial thread, its stack is mapped on demand, 964 // see notes about MAP_GROWSDOWN. Here we try to force kernel to map 965 // the entire stack region to avoid SEGV in stack banging. 966 // It is also useful to get around the heap-stack-gap problem on SuSE 967 // kernel (see 4821821 for details). We first expand stack to the top 968 // of yellow zone, then enable stack yellow zone (order is significant, 969 // enabling yellow zone first will crash JVM on SuSE Linux), so there 970 // is no gap between the last two virtual memory regions. 971 972 StackOverflow* overflow_state = thread->stack_overflow_state(); 973 address addr = overflow_state->stack_reserved_zone_base(); 974 assert(addr != NULL, "initialization problem?"); 975 assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled"); 976 977 osthread->set_expanding_stack(); 978 os::Linux::manually_expand_stack(thread, addr); 979 osthread->clear_expanding_stack(); 980 } 981 982 // initialize signal mask for this thread 983 // and save the caller's signal mask 984 PosixSignals::hotspot_sigmask(thread); 985 986 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT 987 ", stack: " PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "K) ).", 988 os::current_thread_id(), (uintx) pthread_self(), 989 p2i(thread->stack_base()), p2i(thread->stack_end()), thread->stack_size()); 990 991 return true; 992 } 993 994 void os::pd_start_thread(Thread* thread) { 995 OSThread * osthread = thread->osthread(); 996 assert(osthread->get_state() != INITIALIZED, "just checking"); 997 Monitor* sync_with_child = osthread->startThread_lock(); 998 MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag); 999 sync_with_child->notify(); 1000 } 1001 1002 // Free Linux resources related to the OSThread 1003 void os::free_thread(OSThread* osthread) { 1004 assert(osthread != NULL, "osthread not set"); 1005 1006 // We are told to free resources of the argument thread, 1007 // but we can only really operate on the current thread. 1008 assert(Thread::current()->osthread() == osthread, 1009 "os::free_thread but not current thread"); 1010 1011 #ifdef ASSERT 1012 sigset_t current; 1013 sigemptyset(¤t); 1014 pthread_sigmask(SIG_SETMASK, NULL, ¤t); 1015 assert(!sigismember(¤t, PosixSignals::SR_signum), "SR signal should not be blocked!"); 1016 #endif 1017 1018 // Restore caller's signal mask 1019 sigset_t sigmask = osthread->caller_sigmask(); 1020 pthread_sigmask(SIG_SETMASK, &sigmask, NULL); 1021 1022 delete osthread; 1023 } 1024 1025 ////////////////////////////////////////////////////////////////////////////// 1026 // primordial thread 1027 1028 // Check if current thread is the primordial thread, similar to Solaris thr_main. 1029 bool os::is_primordial_thread(void) { 1030 if (suppress_primordial_thread_resolution) { 1031 return false; 1032 } 1033 char dummy; 1034 // If called before init complete, thread stack bottom will be null. 1035 // Can be called if fatal error occurs before initialization. 1036 if (os::Linux::initial_thread_stack_bottom() == NULL) return false; 1037 assert(os::Linux::initial_thread_stack_bottom() != NULL && 1038 os::Linux::initial_thread_stack_size() != 0, 1039 "os::init did not locate primordial thread's stack region"); 1040 if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() && 1041 (address)&dummy < os::Linux::initial_thread_stack_bottom() + 1042 os::Linux::initial_thread_stack_size()) { 1043 return true; 1044 } else { 1045 return false; 1046 } 1047 } 1048 1049 // Find the virtual memory area that contains addr 1050 static bool find_vma(address addr, address* vma_low, address* vma_high) { 1051 FILE *fp = fopen("/proc/self/maps", "r"); 1052 if (fp) { 1053 address low, high; 1054 while (!feof(fp)) { 1055 if (fscanf(fp, "%p-%p", &low, &high) == 2) { 1056 if (low <= addr && addr < high) { 1057 if (vma_low) *vma_low = low; 1058 if (vma_high) *vma_high = high; 1059 fclose(fp); 1060 return true; 1061 } 1062 } 1063 for (;;) { 1064 int ch = fgetc(fp); 1065 if (ch == EOF || ch == (int)'\n') break; 1066 } 1067 } 1068 fclose(fp); 1069 } 1070 return false; 1071 } 1072 1073 // Locate primordial thread stack. This special handling of primordial thread stack 1074 // is needed because pthread_getattr_np() on most (all?) Linux distros returns 1075 // bogus value for the primordial process thread. While the launcher has created 1076 // the VM in a new thread since JDK 6, we still have to allow for the use of the 1077 // JNI invocation API from a primordial thread. 1078 void os::Linux::capture_initial_stack(size_t max_size) { 1079 1080 // max_size is either 0 (which means accept OS default for thread stacks) or 1081 // a user-specified value known to be at least the minimum needed. If we 1082 // are actually on the primordial thread we can make it appear that we have a 1083 // smaller max_size stack by inserting the guard pages at that location. But we 1084 // cannot do anything to emulate a larger stack than what has been provided by 1085 // the OS or threading library. In fact if we try to use a stack greater than 1086 // what is set by rlimit then we will crash the hosting process. 1087 1088 // Maximum stack size is the easy part, get it from RLIMIT_STACK. 1089 // If this is "unlimited" then it will be a huge value. 1090 struct rlimit rlim; 1091 getrlimit(RLIMIT_STACK, &rlim); 1092 size_t stack_size = rlim.rlim_cur; 1093 1094 // 6308388: a bug in ld.so will relocate its own .data section to the 1095 // lower end of primordial stack; reduce ulimit -s value a little bit 1096 // so we won't install guard page on ld.so's data section. 1097 // But ensure we don't underflow the stack size - allow 1 page spare 1098 if (stack_size >= (size_t)(3 * page_size())) { 1099 stack_size -= 2 * page_size(); 1100 } 1101 1102 // Try to figure out where the stack base (top) is. This is harder. 1103 // 1104 // When an application is started, glibc saves the initial stack pointer in 1105 // a global variable "__libc_stack_end", which is then used by system 1106 // libraries. __libc_stack_end should be pretty close to stack top. The 1107 // variable is available since the very early days. However, because it is 1108 // a private interface, it could disappear in the future. 1109 // 1110 // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar 1111 // to __libc_stack_end, it is very close to stack top, but isn't the real 1112 // stack top. Note that /proc may not exist if VM is running as a chroot 1113 // program, so reading /proc/<pid>/stat could fail. Also the contents of 1114 // /proc/<pid>/stat could change in the future (though unlikely). 1115 // 1116 // We try __libc_stack_end first. If that doesn't work, look for 1117 // /proc/<pid>/stat. If neither of them works, we use current stack pointer 1118 // as a hint, which should work well in most cases. 1119 1120 uintptr_t stack_start; 1121 1122 // try __libc_stack_end first 1123 uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end"); 1124 if (p && *p) { 1125 stack_start = *p; 1126 } else { 1127 // see if we can get the start_stack field from /proc/self/stat 1128 FILE *fp; 1129 int pid; 1130 char state; 1131 int ppid; 1132 int pgrp; 1133 int session; 1134 int nr; 1135 int tpgrp; 1136 unsigned long flags; 1137 unsigned long minflt; 1138 unsigned long cminflt; 1139 unsigned long majflt; 1140 unsigned long cmajflt; 1141 unsigned long utime; 1142 unsigned long stime; 1143 long cutime; 1144 long cstime; 1145 long prio; 1146 long nice; 1147 long junk; 1148 long it_real; 1149 uintptr_t start; 1150 uintptr_t vsize; 1151 intptr_t rss; 1152 uintptr_t rsslim; 1153 uintptr_t scodes; 1154 uintptr_t ecode; 1155 int i; 1156 1157 // Figure what the primordial thread stack base is. Code is inspired 1158 // by email from Hans Boehm. /proc/self/stat begins with current pid, 1159 // followed by command name surrounded by parentheses, state, etc. 1160 char stat[2048]; 1161 int statlen; 1162 1163 fp = fopen("/proc/self/stat", "r"); 1164 if (fp) { 1165 statlen = fread(stat, 1, 2047, fp); 1166 stat[statlen] = '\0'; 1167 fclose(fp); 1168 1169 // Skip pid and the command string. Note that we could be dealing with 1170 // weird command names, e.g. user could decide to rename java launcher 1171 // to "java 1.4.2 :)", then the stat file would look like 1172 // 1234 (java 1.4.2 :)) R ... ... 1173 // We don't really need to know the command string, just find the last 1174 // occurrence of ")" and then start parsing from there. See bug 4726580. 1175 char * s = strrchr(stat, ')'); 1176 1177 i = 0; 1178 if (s) { 1179 // Skip blank chars 1180 do { s++; } while (s && isspace(*s)); 1181 1182 #define _UFM UINTX_FORMAT 1183 #define _DFM INTX_FORMAT 1184 1185 // 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 1186 // 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 1187 i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM, 1188 &state, // 3 %c 1189 &ppid, // 4 %d 1190 &pgrp, // 5 %d 1191 &session, // 6 %d 1192 &nr, // 7 %d 1193 &tpgrp, // 8 %d 1194 &flags, // 9 %lu 1195 &minflt, // 10 %lu 1196 &cminflt, // 11 %lu 1197 &majflt, // 12 %lu 1198 &cmajflt, // 13 %lu 1199 &utime, // 14 %lu 1200 &stime, // 15 %lu 1201 &cutime, // 16 %ld 1202 &cstime, // 17 %ld 1203 &prio, // 18 %ld 1204 &nice, // 19 %ld 1205 &junk, // 20 %ld 1206 &it_real, // 21 %ld 1207 &start, // 22 UINTX_FORMAT 1208 &vsize, // 23 UINTX_FORMAT 1209 &rss, // 24 INTX_FORMAT 1210 &rsslim, // 25 UINTX_FORMAT 1211 &scodes, // 26 UINTX_FORMAT 1212 &ecode, // 27 UINTX_FORMAT 1213 &stack_start); // 28 UINTX_FORMAT 1214 } 1215 1216 #undef _UFM 1217 #undef _DFM 1218 1219 if (i != 28 - 2) { 1220 assert(false, "Bad conversion from /proc/self/stat"); 1221 // product mode - assume we are the primordial thread, good luck in the 1222 // embedded case. 1223 warning("Can't detect primordial thread stack location - bad conversion"); 1224 stack_start = (uintptr_t) &rlim; 1225 } 1226 } else { 1227 // For some reason we can't open /proc/self/stat (for example, running on 1228 // FreeBSD with a Linux emulator, or inside chroot), this should work for 1229 // most cases, so don't abort: 1230 warning("Can't detect primordial thread stack location - no /proc/self/stat"); 1231 stack_start = (uintptr_t) &rlim; 1232 } 1233 } 1234 1235 // Now we have a pointer (stack_start) very close to the stack top, the 1236 // next thing to do is to figure out the exact location of stack top. We 1237 // can find out the virtual memory area that contains stack_start by 1238 // reading /proc/self/maps, it should be the last vma in /proc/self/maps, 1239 // and its upper limit is the real stack top. (again, this would fail if 1240 // running inside chroot, because /proc may not exist.) 1241 1242 uintptr_t stack_top; 1243 address low, high; 1244 if (find_vma((address)stack_start, &low, &high)) { 1245 // success, "high" is the true stack top. (ignore "low", because initial 1246 // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.) 1247 stack_top = (uintptr_t)high; 1248 } else { 1249 // failed, likely because /proc/self/maps does not exist 1250 warning("Can't detect primordial thread stack location - find_vma failed"); 1251 // best effort: stack_start is normally within a few pages below the real 1252 // stack top, use it as stack top, and reduce stack size so we won't put 1253 // guard page outside stack. 1254 stack_top = stack_start; 1255 stack_size -= 16 * page_size(); 1256 } 1257 1258 // stack_top could be partially down the page so align it 1259 stack_top = align_up(stack_top, page_size()); 1260 1261 // Allowed stack value is minimum of max_size and what we derived from rlimit 1262 if (max_size > 0) { 1263 _initial_thread_stack_size = MIN2(max_size, stack_size); 1264 } else { 1265 // Accept the rlimit max, but if stack is unlimited then it will be huge, so 1266 // clamp it at 8MB as we do on Solaris 1267 _initial_thread_stack_size = MIN2(stack_size, 8*M); 1268 } 1269 _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size()); 1270 _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; 1271 1272 assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); 1273 1274 if (log_is_enabled(Info, os, thread)) { 1275 // See if we seem to be on primordial process thread 1276 bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) && 1277 uintptr_t(&rlim) < stack_top; 1278 1279 log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: " 1280 SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT, 1281 primordial ? "primordial" : "user", max_size / K, _initial_thread_stack_size / K, 1282 stack_top, intptr_t(_initial_thread_stack_bottom)); 1283 } 1284 } 1285 1286 //////////////////////////////////////////////////////////////////////////////// 1287 // time support 1288 1289 // Time since start-up in seconds to a fine granularity. 1290 double os::elapsedTime() { 1291 return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution 1292 } 1293 1294 jlong os::elapsed_counter() { 1295 return javaTimeNanos() - initial_time_count; 1296 } 1297 1298 jlong os::elapsed_frequency() { 1299 return NANOSECS_PER_SEC; // nanosecond resolution 1300 } 1301 1302 bool os::supports_vtime() { return true; } 1303 1304 double os::elapsedVTime() { 1305 struct rusage usage; 1306 int retval = getrusage(RUSAGE_THREAD, &usage); 1307 if (retval == 0) { 1308 return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000); 1309 } else { 1310 // better than nothing, but not much 1311 return elapsedTime(); 1312 } 1313 } 1314 1315 void os::Linux::fast_thread_clock_init() { 1316 if (!UseLinuxPosixThreadCPUClocks) { 1317 return; 1318 } 1319 clockid_t clockid; 1320 struct timespec tp; 1321 int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) = 1322 (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid"); 1323 1324 // Switch to using fast clocks for thread cpu time if 1325 // the clock_getres() returns 0 error code. 1326 // Note, that some kernels may support the current thread 1327 // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks 1328 // returned by the pthread_getcpuclockid(). 1329 // If the fast Posix clocks are supported then the clock_getres() 1330 // must return at least tp.tv_sec == 0 which means a resolution 1331 // better than 1 sec. This is extra check for reliability. 1332 1333 if (pthread_getcpuclockid_func && 1334 pthread_getcpuclockid_func(_main_thread, &clockid) == 0 && 1335 clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) { 1336 _supports_fast_thread_cpu_time = true; 1337 _pthread_getcpuclockid = pthread_getcpuclockid_func; 1338 } 1339 } 1340 1341 // Return the real, user, and system times in seconds from an 1342 // arbitrary fixed point in the past. 1343 bool os::getTimesSecs(double* process_real_time, 1344 double* process_user_time, 1345 double* process_system_time) { 1346 struct tms ticks; 1347 clock_t real_ticks = times(&ticks); 1348 1349 if (real_ticks == (clock_t) (-1)) { 1350 return false; 1351 } else { 1352 double ticks_per_second = (double) clock_tics_per_sec; 1353 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1354 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1355 *process_real_time = ((double) real_ticks) / ticks_per_second; 1356 1357 return true; 1358 } 1359 } 1360 1361 1362 char * os::local_time_string(char *buf, size_t buflen) { 1363 struct tm t; 1364 time_t long_time; 1365 time(&long_time); 1366 localtime_r(&long_time, &t); 1367 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1368 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1369 t.tm_hour, t.tm_min, t.tm_sec); 1370 return buf; 1371 } 1372 1373 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 1374 return localtime_r(clock, res); 1375 } 1376 1377 // thread_id is kernel thread id (similar to Solaris LWP id) 1378 intx os::current_thread_id() { return os::Linux::gettid(); } 1379 int os::current_process_id() { 1380 return ::getpid(); 1381 } 1382 1383 // DLL functions 1384 1385 const char* os::dll_file_extension() { return ".so"; } 1386 1387 // This must be hard coded because it's the system's temporary 1388 // directory not the java application's temp directory, ala java.io.tmpdir. 1389 const char* os::get_temp_directory() { return "/tmp"; } 1390 1391 static bool file_exists(const char* filename) { 1392 struct stat statbuf; 1393 if (filename == NULL || strlen(filename) == 0) { 1394 return false; 1395 } 1396 return os::stat(filename, &statbuf) == 0; 1397 } 1398 1399 // check if addr is inside libjvm.so 1400 bool os::address_is_in_vm(address addr) { 1401 static address libjvm_base_addr; 1402 Dl_info dlinfo; 1403 1404 if (libjvm_base_addr == NULL) { 1405 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { 1406 libjvm_base_addr = (address)dlinfo.dli_fbase; 1407 } 1408 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1409 } 1410 1411 if (dladdr((void *)addr, &dlinfo) != 0) { 1412 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1413 } 1414 1415 return false; 1416 } 1417 1418 bool os::dll_address_to_function_name(address addr, char *buf, 1419 int buflen, int *offset, 1420 bool demangle) { 1421 // buf is not optional, but offset is optional 1422 assert(buf != NULL, "sanity check"); 1423 1424 Dl_info dlinfo; 1425 1426 if (dladdr((void*)addr, &dlinfo) != 0) { 1427 // see if we have a matching symbol 1428 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { 1429 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { 1430 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1431 } 1432 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1433 return true; 1434 } 1435 // no matching symbol so try for just file info 1436 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1437 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1438 buf, buflen, offset, dlinfo.dli_fname, demangle)) { 1439 return true; 1440 } 1441 } 1442 } 1443 1444 buf[0] = '\0'; 1445 if (offset != NULL) *offset = -1; 1446 return false; 1447 } 1448 1449 struct _address_to_library_name { 1450 address addr; // input : memory address 1451 size_t buflen; // size of fname 1452 char* fname; // output: library name 1453 address base; // library base addr 1454 }; 1455 1456 static int address_to_library_name_callback(struct dl_phdr_info *info, 1457 size_t size, void *data) { 1458 int i; 1459 bool found = false; 1460 address libbase = NULL; 1461 struct _address_to_library_name * d = (struct _address_to_library_name *)data; 1462 1463 // iterate through all loadable segments 1464 for (i = 0; i < info->dlpi_phnum; i++) { 1465 address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); 1466 if (info->dlpi_phdr[i].p_type == PT_LOAD) { 1467 // base address of a library is the lowest address of its loaded 1468 // segments. 1469 if (libbase == NULL || libbase > segbase) { 1470 libbase = segbase; 1471 } 1472 // see if 'addr' is within current segment 1473 if (segbase <= d->addr && 1474 d->addr < segbase + info->dlpi_phdr[i].p_memsz) { 1475 found = true; 1476 } 1477 } 1478 } 1479 1480 // dlpi_name is NULL or empty if the ELF file is executable, return 0 1481 // so dll_address_to_library_name() can fall through to use dladdr() which 1482 // can figure out executable name from argv[0]. 1483 if (found && info->dlpi_name && info->dlpi_name[0]) { 1484 d->base = libbase; 1485 if (d->fname) { 1486 jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name); 1487 } 1488 return 1; 1489 } 1490 return 0; 1491 } 1492 1493 bool os::dll_address_to_library_name(address addr, char* buf, 1494 int buflen, int* offset) { 1495 // buf is not optional, but offset is optional 1496 assert(buf != NULL, "sanity check"); 1497 1498 Dl_info dlinfo; 1499 struct _address_to_library_name data; 1500 1501 // There is a bug in old glibc dladdr() implementation that it could resolve 1502 // to wrong library name if the .so file has a base address != NULL. Here 1503 // we iterate through the program headers of all loaded libraries to find 1504 // out which library 'addr' really belongs to. This workaround can be 1505 // removed once the minimum requirement for glibc is moved to 2.3.x. 1506 data.addr = addr; 1507 data.fname = buf; 1508 data.buflen = buflen; 1509 data.base = NULL; 1510 int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data); 1511 1512 if (rslt) { 1513 // buf already contains library name 1514 if (offset) *offset = addr - data.base; 1515 return true; 1516 } 1517 if (dladdr((void*)addr, &dlinfo) != 0) { 1518 if (dlinfo.dli_fname != NULL) { 1519 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 1520 } 1521 if (dlinfo.dli_fbase != NULL && offset != NULL) { 1522 *offset = addr - (address)dlinfo.dli_fbase; 1523 } 1524 return true; 1525 } 1526 1527 buf[0] = '\0'; 1528 if (offset) *offset = -1; 1529 return false; 1530 } 1531 1532 // Loads .dll/.so and 1533 // in case of error it checks if .dll/.so was built for the 1534 // same architecture as Hotspot is running on 1535 1536 1537 // Remember the stack's state. The Linux dynamic linker will change 1538 // the stack to 'executable' at most once, so we must safepoint only once. 1539 bool os::Linux::_stack_is_executable = false; 1540 1541 // VM operation that loads a library. This is necessary if stack protection 1542 // of the Java stacks can be lost during loading the library. If we 1543 // do not stop the Java threads, they can stack overflow before the stacks 1544 // are protected again. 1545 class VM_LinuxDllLoad: public VM_Operation { 1546 private: 1547 const char *_filename; 1548 char *_ebuf; 1549 int _ebuflen; 1550 void *_lib; 1551 public: 1552 VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) : 1553 _filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {} 1554 VMOp_Type type() const { return VMOp_LinuxDllLoad; } 1555 void doit() { 1556 _lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen); 1557 os::Linux::_stack_is_executable = true; 1558 } 1559 void* loaded_library() { return _lib; } 1560 }; 1561 1562 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1563 void * result = NULL; 1564 bool load_attempted = false; 1565 1566 log_info(os)("attempting shared library load of %s", filename); 1567 1568 // Check whether the library to load might change execution rights 1569 // of the stack. If they are changed, the protection of the stack 1570 // guard pages will be lost. We need a safepoint to fix this. 1571 // 1572 // See Linux man page execstack(8) for more info. 1573 if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) { 1574 if (!ElfFile::specifies_noexecstack(filename)) { 1575 if (!is_init_completed()) { 1576 os::Linux::_stack_is_executable = true; 1577 // This is OK - No Java threads have been created yet, and hence no 1578 // stack guard pages to fix. 1579 // 1580 // Dynamic loader will make all stacks executable after 1581 // this function returns, and will not do that again. 1582 assert(Threads::number_of_threads() == 0, "no Java threads should exist yet."); 1583 } else { 1584 warning("You have loaded library %s which might have disabled stack guard. " 1585 "The VM will try to fix the stack guard now.\n" 1586 "It's highly recommended that you fix the library with " 1587 "'execstack -c <libfile>', or link it with '-z noexecstack'.", 1588 filename); 1589 1590 JavaThread *jt = JavaThread::current(); 1591 if (jt->thread_state() != _thread_in_native) { 1592 // This happens when a compiler thread tries to load a hsdis-<arch>.so file 1593 // that requires ExecStack. Cannot enter safe point. Let's give up. 1594 warning("Unable to fix stack guard. Giving up."); 1595 } else { 1596 if (!LoadExecStackDllInVMThread) { 1597 // This is for the case where the DLL has an static 1598 // constructor function that executes JNI code. We cannot 1599 // load such DLLs in the VMThread. 1600 result = os::Linux::dlopen_helper(filename, ebuf, ebuflen); 1601 } 1602 1603 ThreadInVMfromNative tiv(jt); 1604 debug_only(VMNativeEntryWrapper vew;) 1605 1606 VM_LinuxDllLoad op(filename, ebuf, ebuflen); 1607 VMThread::execute(&op); 1608 if (LoadExecStackDllInVMThread) { 1609 result = op.loaded_library(); 1610 } 1611 load_attempted = true; 1612 } 1613 } 1614 } 1615 } 1616 1617 if (!load_attempted) { 1618 result = os::Linux::dlopen_helper(filename, ebuf, ebuflen); 1619 } 1620 1621 if (result != NULL) { 1622 // Successful loading 1623 return result; 1624 } 1625 1626 Elf32_Ehdr elf_head; 1627 int diag_msg_max_length=ebuflen-strlen(ebuf); 1628 char* diag_msg_buf=ebuf+strlen(ebuf); 1629 1630 if (diag_msg_max_length==0) { 1631 // No more space in ebuf for additional diagnostics message 1632 return NULL; 1633 } 1634 1635 1636 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 1637 1638 if (file_descriptor < 0) { 1639 // Can't open library, report dlerror() message 1640 return NULL; 1641 } 1642 1643 bool failed_to_read_elf_head= 1644 (sizeof(elf_head)!= 1645 (::read(file_descriptor, &elf_head,sizeof(elf_head)))); 1646 1647 ::close(file_descriptor); 1648 if (failed_to_read_elf_head) { 1649 // file i/o error - report dlerror() msg 1650 return NULL; 1651 } 1652 1653 if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) { 1654 // handle invalid/out of range endianness values 1655 if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) { 1656 return NULL; 1657 } 1658 1659 #if defined(VM_LITTLE_ENDIAN) 1660 // VM is LE, shared object BE 1661 elf_head.e_machine = be16toh(elf_head.e_machine); 1662 #else 1663 // VM is BE, shared object LE 1664 elf_head.e_machine = le16toh(elf_head.e_machine); 1665 #endif 1666 } 1667 1668 typedef struct { 1669 Elf32_Half code; // Actual value as defined in elf.h 1670 Elf32_Half compat_class; // Compatibility of archs at VM's sense 1671 unsigned char elf_class; // 32 or 64 bit 1672 unsigned char endianness; // MSB or LSB 1673 char* name; // String representation 1674 } arch_t; 1675 1676 #ifndef EM_AARCH64 1677 #define EM_AARCH64 183 /* ARM AARCH64 */ 1678 #endif 1679 #ifndef EM_RISCV 1680 #define EM_RISCV 243 /* RISC-V */ 1681 #endif 1682 #ifndef EM_LOONGARCH 1683 #define EM_LOONGARCH 258 /* LoongArch */ 1684 #endif 1685 1686 static const arch_t arch_array[]={ 1687 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1688 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1689 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 1690 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 1691 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1692 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1693 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 1694 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 1695 #if defined(VM_LITTLE_ENDIAN) 1696 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"}, 1697 {EM_SH, EM_SH, ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"}, 1698 #else 1699 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 1700 {EM_SH, EM_SH, ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"}, 1701 #endif 1702 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, 1703 // we only support 64 bit z architecture 1704 {EM_S390, EM_S390, ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"}, 1705 {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, 1706 {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"}, 1707 {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"}, 1708 {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"}, 1709 {EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}, 1710 {EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"}, 1711 {EM_RISCV, EM_RISCV, ELFCLASS64, ELFDATA2LSB, (char*)"RISC-V"}, 1712 {EM_LOONGARCH, EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"}, 1713 }; 1714 1715 #if (defined IA32) 1716 static Elf32_Half running_arch_code=EM_386; 1717 #elif (defined AMD64) || (defined X32) 1718 static Elf32_Half running_arch_code=EM_X86_64; 1719 #elif (defined IA64) 1720 static Elf32_Half running_arch_code=EM_IA_64; 1721 #elif (defined __sparc) && (defined _LP64) 1722 static Elf32_Half running_arch_code=EM_SPARCV9; 1723 #elif (defined __sparc) && (!defined _LP64) 1724 static Elf32_Half running_arch_code=EM_SPARC; 1725 #elif (defined __powerpc64__) 1726 static Elf32_Half running_arch_code=EM_PPC64; 1727 #elif (defined __powerpc__) 1728 static Elf32_Half running_arch_code=EM_PPC; 1729 #elif (defined AARCH64) 1730 static Elf32_Half running_arch_code=EM_AARCH64; 1731 #elif (defined ARM) 1732 static Elf32_Half running_arch_code=EM_ARM; 1733 #elif (defined S390) 1734 static Elf32_Half running_arch_code=EM_S390; 1735 #elif (defined ALPHA) 1736 static Elf32_Half running_arch_code=EM_ALPHA; 1737 #elif (defined MIPSEL) 1738 static Elf32_Half running_arch_code=EM_MIPS_RS3_LE; 1739 #elif (defined PARISC) 1740 static Elf32_Half running_arch_code=EM_PARISC; 1741 #elif (defined MIPS) 1742 static Elf32_Half running_arch_code=EM_MIPS; 1743 #elif (defined M68K) 1744 static Elf32_Half running_arch_code=EM_68K; 1745 #elif (defined SH) 1746 static Elf32_Half running_arch_code=EM_SH; 1747 #elif (defined RISCV) 1748 static Elf32_Half running_arch_code=EM_RISCV; 1749 #elif (defined LOONGARCH) 1750 static Elf32_Half running_arch_code=EM_LOONGARCH; 1751 #else 1752 #error Method os::dll_load requires that one of following is defined:\ 1753 AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc 1754 #endif 1755 1756 // Identify compatibility class for VM's architecture and library's architecture 1757 // Obtain string descriptions for architectures 1758 1759 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 1760 int running_arch_index=-1; 1761 1762 for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) { 1763 if (running_arch_code == arch_array[i].code) { 1764 running_arch_index = i; 1765 } 1766 if (lib_arch.code == arch_array[i].code) { 1767 lib_arch.compat_class = arch_array[i].compat_class; 1768 lib_arch.name = arch_array[i].name; 1769 } 1770 } 1771 1772 assert(running_arch_index != -1, 1773 "Didn't find running architecture code (running_arch_code) in arch_array"); 1774 if (running_arch_index == -1) { 1775 // Even though running architecture detection failed 1776 // we may still continue with reporting dlerror() message 1777 return NULL; 1778 } 1779 1780 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 1781 if (lib_arch.name != NULL) { 1782 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1783 " (Possible cause: can't load %s .so on a %s platform)", 1784 lib_arch.name, arch_array[running_arch_index].name); 1785 } else { 1786 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1787 " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)", 1788 lib_arch.code, arch_array[running_arch_index].name); 1789 } 1790 return NULL; 1791 } 1792 1793 if (lib_arch.endianness != arch_array[running_arch_index].endianness) { 1794 ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)"); 1795 return NULL; 1796 } 1797 1798 // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit 1799 if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) { 1800 ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)"); 1801 return NULL; 1802 } 1803 1804 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 1805 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1806 " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)", 1807 (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32); 1808 return NULL; 1809 } 1810 1811 return NULL; 1812 } 1813 1814 void * os::Linux::dlopen_helper(const char *filename, char *ebuf, 1815 int ebuflen) { 1816 void * result = ::dlopen(filename, RTLD_LAZY); 1817 if (result == NULL) { 1818 const char* error_report = ::dlerror(); 1819 if (error_report == NULL) { 1820 error_report = "dlerror returned no error description"; 1821 } 1822 if (ebuf != NULL && ebuflen > 0) { 1823 ::strncpy(ebuf, error_report, ebuflen-1); 1824 ebuf[ebuflen-1]='\0'; 1825 } 1826 Events::log_dll_message(NULL, "Loading shared library %s failed, %s", filename, error_report); 1827 log_info(os)("shared library load of %s failed, %s", filename, error_report); 1828 } else { 1829 Events::log_dll_message(NULL, "Loaded shared library %s", filename); 1830 log_info(os)("shared library load of %s was successful", filename); 1831 } 1832 return result; 1833 } 1834 1835 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, 1836 int ebuflen) { 1837 void * result = NULL; 1838 if (LoadExecStackDllInVMThread) { 1839 result = dlopen_helper(filename, ebuf, ebuflen); 1840 } 1841 1842 // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a 1843 // library that requires an executable stack, or which does not have this 1844 // stack attribute set, dlopen changes the stack attribute to executable. The 1845 // read protection of the guard pages gets lost. 1846 // 1847 // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad 1848 // may have been queued at the same time. 1849 1850 if (!_stack_is_executable) { 1851 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1852 StackOverflow* overflow_state = jt->stack_overflow_state(); 1853 if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized 1854 overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions 1855 if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) { 1856 warning("Attempt to reguard stack yellow zone failed."); 1857 } 1858 } 1859 } 1860 } 1861 1862 return result; 1863 } 1864 1865 const char* os::Linux::dll_path(void* lib) { 1866 struct link_map *lmap; 1867 const char* l_path = NULL; 1868 assert(lib != NULL, "dll_path parameter must not be NULL"); 1869 1870 int res_dli = ::dlinfo(lib, RTLD_DI_LINKMAP, &lmap); 1871 if (res_dli == 0) { 1872 l_path = lmap->l_name; 1873 } 1874 return l_path; 1875 } 1876 1877 static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) { 1878 int fd = ::open(filename, O_RDONLY); 1879 if (fd == -1) { 1880 return false; 1881 } 1882 1883 if (hdr != NULL) { 1884 st->print_cr("%s", hdr); 1885 } 1886 1887 char buf[33]; 1888 int bytes; 1889 buf[32] = '\0'; 1890 while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) { 1891 st->print_raw(buf, bytes); 1892 } 1893 1894 ::close(fd); 1895 1896 return true; 1897 } 1898 1899 static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) { 1900 st->print("%s:%c", header, same_line ? ' ' : '\n'); 1901 if (!_print_ascii_file(filename, st)) { 1902 st->print_cr("<Not Available>"); 1903 } 1904 } 1905 1906 void os::print_dll_info(outputStream *st) { 1907 st->print_cr("Dynamic libraries:"); 1908 1909 char fname[32]; 1910 pid_t pid = os::Linux::gettid(); 1911 1912 jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid); 1913 1914 if (!_print_ascii_file(fname, st)) { 1915 st->print_cr("Can not get library information for pid = %d", pid); 1916 } 1917 } 1918 1919 struct loaded_modules_info_param { 1920 os::LoadedModulesCallbackFunc callback; 1921 void *param; 1922 }; 1923 1924 static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) { 1925 if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) { 1926 return 0; 1927 } 1928 1929 struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data); 1930 address base = NULL; 1931 address top = NULL; 1932 for (int idx = 0; idx < info->dlpi_phnum; idx++) { 1933 const ElfW(Phdr) *phdr = info->dlpi_phdr + idx; 1934 if (phdr->p_type == PT_LOAD) { 1935 address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr); 1936 1937 address phdr_base = align_down(raw_phdr_base, phdr->p_align); 1938 if ((base == NULL) || (base > phdr_base)) { 1939 base = phdr_base; 1940 } 1941 1942 address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align); 1943 if ((top == NULL) || (top < phdr_top)) { 1944 top = phdr_top; 1945 } 1946 } 1947 } 1948 1949 return callback_param->callback(info->dlpi_name, base, top, callback_param->param); 1950 } 1951 1952 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1953 struct loaded_modules_info_param callback_param = {callback, param}; 1954 return dl_iterate_phdr(&dl_iterate_callback, &callback_param); 1955 } 1956 1957 void os::print_os_info_brief(outputStream* st) { 1958 os::Linux::print_distro_info(st); 1959 1960 os::Posix::print_uname_info(st); 1961 1962 os::Linux::print_libversion_info(st); 1963 1964 } 1965 1966 void os::print_os_info(outputStream* st) { 1967 st->print_cr("OS:"); 1968 1969 os::Linux::print_distro_info(st); 1970 1971 os::Posix::print_uname_info(st); 1972 1973 os::Linux::print_uptime_info(st); 1974 1975 // Print warning if unsafe chroot environment detected 1976 if (unsafe_chroot_detected) { 1977 st->print_cr("WARNING!! %s", unstable_chroot_error); 1978 } 1979 1980 os::Linux::print_libversion_info(st); 1981 1982 os::Posix::print_rlimit_info(st); 1983 1984 os::Posix::print_load_average(st); 1985 st->cr(); 1986 1987 os::Linux::print_system_memory_info(st); 1988 st->cr(); 1989 1990 os::Linux::print_process_memory_info(st); 1991 st->cr(); 1992 1993 os::Linux::print_proc_sys_info(st); 1994 st->cr(); 1995 1996 if (os::Linux::print_ld_preload_file(st)) { 1997 st->cr(); 1998 } 1999 2000 if (os::Linux::print_container_info(st)) { 2001 st->cr(); 2002 } 2003 2004 VM_Version::print_platform_virtualization_info(st); 2005 2006 os::Linux::print_steal_info(st); 2007 } 2008 2009 // Try to identify popular distros. 2010 // Most Linux distributions have a /etc/XXX-release file, which contains 2011 // the OS version string. Newer Linux distributions have a /etc/lsb-release 2012 // file that also contains the OS version string. Some have more than one 2013 // /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and 2014 // /etc/redhat-release.), so the order is important. 2015 // Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have 2016 // their own specific XXX-release file as well as a redhat-release file. 2017 // Because of this the XXX-release file needs to be searched for before the 2018 // redhat-release file. 2019 // Since Red Hat and SuSE have an lsb-release file that is not very descriptive the 2020 // search for redhat-release / SuSE-release needs to be before lsb-release. 2021 // Since the lsb-release file is the new standard it needs to be searched 2022 // before the older style release files. 2023 // Searching system-release (Red Hat) and os-release (other Linuxes) are a 2024 // next to last resort. The os-release file is a new standard that contains 2025 // distribution information and the system-release file seems to be an old 2026 // standard that has been replaced by the lsb-release and os-release files. 2027 // Searching for the debian_version file is the last resort. It contains 2028 // an informative string like "6.0.6" or "wheezy/sid". Because of this 2029 // "Debian " is printed before the contents of the debian_version file. 2030 2031 const char* distro_files[] = { 2032 "/etc/oracle-release", 2033 "/etc/mandriva-release", 2034 "/etc/mandrake-release", 2035 "/etc/sun-release", 2036 "/etc/redhat-release", 2037 "/etc/SuSE-release", 2038 "/etc/lsb-release", 2039 "/etc/turbolinux-release", 2040 "/etc/gentoo-release", 2041 "/etc/ltib-release", 2042 "/etc/angstrom-version", 2043 "/etc/system-release", 2044 "/etc/os-release", 2045 NULL }; 2046 2047 void os::Linux::print_distro_info(outputStream* st) { 2048 for (int i = 0;; i++) { 2049 const char* file = distro_files[i]; 2050 if (file == NULL) { 2051 break; // done 2052 } 2053 // If file prints, we found it. 2054 if (_print_ascii_file(file, st)) { 2055 return; 2056 } 2057 } 2058 2059 if (file_exists("/etc/debian_version")) { 2060 st->print("Debian "); 2061 _print_ascii_file("/etc/debian_version", st); 2062 } else { 2063 st->print_cr("Linux"); 2064 } 2065 } 2066 2067 static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) { 2068 char buf[256]; 2069 while (fgets(buf, sizeof(buf), fp)) { 2070 // Edit out extra stuff in expected format 2071 if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) { 2072 char* ptr = strstr(buf, "\""); // the name is in quotes 2073 if (ptr != NULL) { 2074 ptr++; // go beyond first quote 2075 char* nl = strchr(ptr, '\"'); 2076 if (nl != NULL) *nl = '\0'; 2077 strncpy(distro, ptr, length); 2078 } else { 2079 ptr = strstr(buf, "="); 2080 ptr++; // go beyond equals then 2081 char* nl = strchr(ptr, '\n'); 2082 if (nl != NULL) *nl = '\0'; 2083 strncpy(distro, ptr, length); 2084 } 2085 return; 2086 } else if (get_first_line) { 2087 char* nl = strchr(buf, '\n'); 2088 if (nl != NULL) *nl = '\0'; 2089 strncpy(distro, buf, length); 2090 return; 2091 } 2092 } 2093 // print last line and close 2094 char* nl = strchr(buf, '\n'); 2095 if (nl != NULL) *nl = '\0'; 2096 strncpy(distro, buf, length); 2097 } 2098 2099 static void parse_os_info(char* distro, size_t length, const char* file) { 2100 FILE* fp = fopen(file, "r"); 2101 if (fp != NULL) { 2102 // if suse format, print out first line 2103 bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0); 2104 parse_os_info_helper(fp, distro, length, get_first_line); 2105 fclose(fp); 2106 } 2107 } 2108 2109 void os::get_summary_os_info(char* buf, size_t buflen) { 2110 for (int i = 0;; i++) { 2111 const char* file = distro_files[i]; 2112 if (file == NULL) { 2113 break; // ran out of distro_files 2114 } 2115 if (file_exists(file)) { 2116 parse_os_info(buf, buflen, file); 2117 return; 2118 } 2119 } 2120 // special case for debian 2121 if (file_exists("/etc/debian_version")) { 2122 strncpy(buf, "Debian ", buflen); 2123 if (buflen > 7) { 2124 parse_os_info(&buf[7], buflen-7, "/etc/debian_version"); 2125 } 2126 } else { 2127 strncpy(buf, "Linux", buflen); 2128 } 2129 } 2130 2131 void os::Linux::print_libversion_info(outputStream* st) { 2132 // libc, pthread 2133 st->print("libc: "); 2134 st->print("%s ", os::Linux::libc_version()); 2135 st->print("%s ", os::Linux::libpthread_version()); 2136 st->cr(); 2137 } 2138 2139 void os::Linux::print_proc_sys_info(outputStream* st) { 2140 _print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)", 2141 "/proc/sys/kernel/threads-max", st); 2142 _print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)", 2143 "/proc/sys/vm/max_map_count", st); 2144 _print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)", 2145 "/proc/sys/kernel/pid_max", st); 2146 } 2147 2148 void os::Linux::print_system_memory_info(outputStream* st) { 2149 _print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false); 2150 st->cr(); 2151 2152 // some information regarding THPs; for details see 2153 // https://www.kernel.org/doc/Documentation/vm/transhuge.txt 2154 _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled", 2155 "/sys/kernel/mm/transparent_hugepage/enabled", st); 2156 _print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)", 2157 "/sys/kernel/mm/transparent_hugepage/defrag", st); 2158 } 2159 2160 bool os::Linux::query_process_memory_info(os::Linux::meminfo_t* info) { 2161 FILE* f = ::fopen("/proc/self/status", "r"); 2162 const int num_values = sizeof(os::Linux::meminfo_t) / sizeof(size_t); 2163 int num_found = 0; 2164 char buf[256]; 2165 info->vmsize = info->vmpeak = info->vmrss = info->vmhwm = info->vmswap = 2166 info->rssanon = info->rssfile = info->rssshmem = -1; 2167 if (f != NULL) { 2168 while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) { 2169 if ( (info->vmsize == -1 && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &info->vmsize) == 1) || 2170 (info->vmpeak == -1 && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &info->vmpeak) == 1) || 2171 (info->vmswap == -1 && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &info->vmswap) == 1) || 2172 (info->vmhwm == -1 && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &info->vmhwm) == 1) || 2173 (info->vmrss == -1 && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &info->vmrss) == 1) || 2174 (info->rssanon == -1 && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &info->rssanon) == 1) || // Needs Linux 4.5 2175 (info->rssfile == -1 && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &info->rssfile) == 1) || // Needs Linux 4.5 2176 (info->rssshmem == -1 && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &info->rssshmem) == 1) // Needs Linux 4.5 2177 ) 2178 { 2179 num_found ++; 2180 } 2181 } 2182 fclose(f); 2183 return true; 2184 } 2185 return false; 2186 } 2187 2188 #ifdef __GLIBC__ 2189 // For Glibc, print a one-liner with the malloc tunables. 2190 // Most important and popular is MALLOC_ARENA_MAX, but we are 2191 // thorough and print them all. 2192 static void print_glibc_malloc_tunables(outputStream* st) { 2193 static const char* var[] = { 2194 // the new variant 2195 "GLIBC_TUNABLES", 2196 // legacy variants 2197 "MALLOC_CHECK_", "MALLOC_TOP_PAD_", "MALLOC_PERTURB_", 2198 "MALLOC_MMAP_THRESHOLD_", "MALLOC_TRIM_THRESHOLD_", 2199 "MALLOC_MMAP_MAX_", "MALLOC_ARENA_TEST", "MALLOC_ARENA_MAX", 2200 NULL}; 2201 st->print("glibc malloc tunables: "); 2202 bool printed = false; 2203 for (int i = 0; var[i] != NULL; i ++) { 2204 const char* const val = ::getenv(var[i]); 2205 if (val != NULL) { 2206 st->print("%s%s=%s", (printed ? ", " : ""), var[i], val); 2207 printed = true; 2208 } 2209 } 2210 if (!printed) { 2211 st->print("(default)"); 2212 } 2213 } 2214 #endif // __GLIBC__ 2215 2216 void os::Linux::print_process_memory_info(outputStream* st) { 2217 2218 st->print_cr("Process Memory:"); 2219 2220 // Print virtual and resident set size; peak values; swap; and for 2221 // rss its components if the kernel is recent enough. 2222 meminfo_t info; 2223 if (query_process_memory_info(&info)) { 2224 st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmsize, info.vmpeak); 2225 st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmrss, info.vmhwm); 2226 if (info.rssanon != -1) { // requires kernel >= 4.5 2227 st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)", 2228 info.rssanon, info.rssfile, info.rssshmem); 2229 } 2230 st->cr(); 2231 if (info.vmswap != -1) { // requires kernel >= 2.6.34 2232 st->print_cr("Swapped out: " SSIZE_FORMAT "K", info.vmswap); 2233 } 2234 } else { 2235 st->print_cr("Could not open /proc/self/status to get process memory related information"); 2236 } 2237 2238 // glibc only: 2239 // - Print outstanding allocations using mallinfo 2240 // - Print glibc tunables 2241 #ifdef __GLIBC__ 2242 size_t total_allocated = 0; 2243 size_t free_retained = 0; 2244 bool might_have_wrapped = false; 2245 if (_mallinfo2 != NULL) { 2246 struct glibc_mallinfo2 mi = _mallinfo2(); 2247 total_allocated = mi.uordblks + mi.hblkhd; 2248 free_retained = mi.fordblks; 2249 } else if (_mallinfo != NULL) { 2250 // mallinfo is an old API. Member names mean next to nothing and, beyond that, are 32-bit signed. 2251 // So for larger footprints the values may have wrapped around. We try to detect this here: if the 2252 // process whole resident set size is smaller than 4G, malloc footprint has to be less than that 2253 // and the numbers are reliable. 2254 struct glibc_mallinfo mi = _mallinfo(); 2255 total_allocated = (size_t)(unsigned)mi.uordblks + (size_t)(unsigned)mi.hblkhd; 2256 free_retained = (size_t)(unsigned)mi.fordblks; 2257 // Since mallinfo members are int, glibc values may have wrapped. Warn about this. 2258 might_have_wrapped = (info.vmrss * K) > UINT_MAX && (info.vmrss * K) > (total_allocated + UINT_MAX); 2259 } 2260 if (_mallinfo2 != NULL || _mallinfo != NULL) { 2261 st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K, retained: " SIZE_FORMAT "K%s", 2262 total_allocated / K, free_retained / K, 2263 might_have_wrapped ? " (may have wrapped)" : ""); 2264 } 2265 // Tunables 2266 print_glibc_malloc_tunables(st); 2267 st->cr(); 2268 #endif 2269 } 2270 2271 bool os::Linux::print_ld_preload_file(outputStream* st) { 2272 return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:"); 2273 } 2274 2275 void os::Linux::print_uptime_info(outputStream* st) { 2276 struct sysinfo sinfo; 2277 int ret = sysinfo(&sinfo); 2278 if (ret == 0) { 2279 os::print_dhm(st, "OS uptime:", (long) sinfo.uptime); 2280 } 2281 } 2282 2283 bool os::Linux::print_container_info(outputStream* st) { 2284 if (!OSContainer::is_containerized()) { 2285 st->print_cr("container information not found."); 2286 return false; 2287 } 2288 2289 st->print_cr("container (cgroup) information:"); 2290 2291 const char *p_ct = OSContainer::container_type(); 2292 st->print_cr("container_type: %s", p_ct != NULL ? p_ct : "not supported"); 2293 2294 char *p = OSContainer::cpu_cpuset_cpus(); 2295 st->print_cr("cpu_cpuset_cpus: %s", p != NULL ? p : "not supported"); 2296 free(p); 2297 2298 p = OSContainer::cpu_cpuset_memory_nodes(); 2299 st->print_cr("cpu_memory_nodes: %s", p != NULL ? p : "not supported"); 2300 free(p); 2301 2302 int i = OSContainer::active_processor_count(); 2303 st->print("active_processor_count: "); 2304 if (i > 0) { 2305 if (ActiveProcessorCount > 0) { 2306 st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount); 2307 } else { 2308 st->print_cr("%d", i); 2309 } 2310 } else { 2311 st->print_cr("not supported"); 2312 } 2313 2314 i = OSContainer::cpu_quota(); 2315 st->print("cpu_quota: "); 2316 if (i > 0) { 2317 st->print_cr("%d", i); 2318 } else { 2319 st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota"); 2320 } 2321 2322 i = OSContainer::cpu_period(); 2323 st->print("cpu_period: "); 2324 if (i > 0) { 2325 st->print_cr("%d", i); 2326 } else { 2327 st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period"); 2328 } 2329 2330 i = OSContainer::cpu_shares(); 2331 st->print("cpu_shares: "); 2332 if (i > 0) { 2333 st->print_cr("%d", i); 2334 } else { 2335 st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares"); 2336 } 2337 2338 OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes"); 2339 OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes"); 2340 OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes"); 2341 OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes"); 2342 OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes"); 2343 2344 OSContainer::print_version_specific_info(st); 2345 2346 jlong j = OSContainer::pids_max(); 2347 st->print("maximum number of tasks: "); 2348 if (j > 0) { 2349 st->print_cr(JLONG_FORMAT, j); 2350 } else { 2351 st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited"); 2352 } 2353 2354 j = OSContainer::pids_current(); 2355 st->print("current number of tasks: "); 2356 if (j > 0) { 2357 st->print_cr(JLONG_FORMAT, j); 2358 } else { 2359 if (j == OSCONTAINER_ERROR) { 2360 st->print_cr("not supported"); 2361 } 2362 } 2363 2364 return true; 2365 } 2366 2367 void os::Linux::print_steal_info(outputStream* st) { 2368 if (has_initial_tick_info) { 2369 CPUPerfTicks pticks; 2370 bool res = os::Linux::get_tick_information(&pticks, -1); 2371 2372 if (res && pticks.has_steal_ticks) { 2373 uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks; 2374 uint64_t total_ticks_difference = pticks.total - initial_total_ticks; 2375 double steal_ticks_perc = 0.0; 2376 if (total_ticks_difference != 0) { 2377 steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference; 2378 } 2379 st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference); 2380 st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc); 2381 } 2382 } 2383 } 2384 2385 void os::print_memory_info(outputStream* st) { 2386 2387 st->print("Memory:"); 2388 st->print(" %dk page", os::vm_page_size()>>10); 2389 2390 // values in struct sysinfo are "unsigned long" 2391 struct sysinfo si; 2392 sysinfo(&si); 2393 2394 st->print(", physical " UINT64_FORMAT "k", 2395 os::physical_memory() >> 10); 2396 st->print("(" UINT64_FORMAT "k free)", 2397 os::available_memory() >> 10); 2398 st->print(", swap " UINT64_FORMAT "k", 2399 ((jlong)si.totalswap * si.mem_unit) >> 10); 2400 st->print("(" UINT64_FORMAT "k free)", 2401 ((jlong)si.freeswap * si.mem_unit) >> 10); 2402 st->cr(); 2403 st->print("Page Sizes: "); 2404 _page_sizes.print_on(st); 2405 st->cr(); 2406 } 2407 2408 // Print the first "model name" line and the first "flags" line 2409 // that we find and nothing more. We assume "model name" comes 2410 // before "flags" so if we find a second "model name", then the 2411 // "flags" field is considered missing. 2412 static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) { 2413 #if defined(IA32) || defined(AMD64) 2414 // Other platforms have less repetitive cpuinfo files 2415 FILE *fp = fopen("/proc/cpuinfo", "r"); 2416 if (fp) { 2417 bool model_name_printed = false; 2418 while (!feof(fp)) { 2419 if (fgets(buf, buflen, fp)) { 2420 // Assume model name comes before flags 2421 if (strstr(buf, "model name") != NULL) { 2422 if (!model_name_printed) { 2423 st->print_raw("CPU Model and flags from /proc/cpuinfo:\n"); 2424 st->print_raw(buf); 2425 model_name_printed = true; 2426 } else { 2427 // model name printed but not flags? Odd, just return 2428 fclose(fp); 2429 return true; 2430 } 2431 } 2432 // print the flags line too 2433 if (strstr(buf, "flags") != NULL) { 2434 st->print_raw(buf); 2435 fclose(fp); 2436 return true; 2437 } 2438 } 2439 } 2440 fclose(fp); 2441 } 2442 #endif // x86 platforms 2443 return false; 2444 } 2445 2446 // additional information about CPU e.g. available frequency ranges 2447 static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) { 2448 _print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st); 2449 _print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st); 2450 2451 if (ExtensiveErrorReports) { 2452 // cache related info (cpu 0, should be similar for other CPUs) 2453 for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries 2454 char hbuf_level[60]; 2455 char hbuf_type[60]; 2456 char hbuf_size[60]; 2457 char hbuf_coherency_line_size[80]; 2458 snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i); 2459 snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i); 2460 snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i); 2461 snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i); 2462 if (file_exists(hbuf_level)) { 2463 _print_ascii_file_h("cache level", hbuf_level, st); 2464 _print_ascii_file_h("cache type", hbuf_type, st); 2465 _print_ascii_file_h("cache size", hbuf_size, st); 2466 _print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st); 2467 } 2468 } 2469 } 2470 2471 // we miss the cpufreq entries on Power and s390x 2472 #if defined(IA32) || defined(AMD64) 2473 _print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st); 2474 _print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st); 2475 _print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st); 2476 // min and max should be in the Available range but still print them (not all info might be available for all kernels) 2477 if (ExtensiveErrorReports) { 2478 _print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st); 2479 _print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st); 2480 _print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st); 2481 } 2482 // governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling 2483 if (ExtensiveErrorReports) { 2484 _print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st); 2485 } 2486 _print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st); 2487 // Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt 2488 // Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g. 2489 // whole chip is not fully utilized 2490 _print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st); 2491 #endif 2492 } 2493 2494 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 2495 // Only print the model name if the platform provides this as a summary 2496 if (!print_model_name_and_flags(st, buf, buflen)) { 2497 _print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false); 2498 } 2499 st->cr(); 2500 print_sys_devices_cpu_info(st, buf, buflen); 2501 } 2502 2503 #if defined(AMD64) || defined(IA32) || defined(X32) 2504 const char* search_string = "model name"; 2505 #elif defined(M68K) 2506 const char* search_string = "CPU"; 2507 #elif defined(PPC64) 2508 const char* search_string = "cpu"; 2509 #elif defined(S390) 2510 const char* search_string = "machine ="; 2511 #elif defined(SPARC) 2512 const char* search_string = "cpu"; 2513 #else 2514 const char* search_string = "Processor"; 2515 #endif 2516 2517 // Parses the cpuinfo file for string representing the model name. 2518 void os::get_summary_cpu_info(char* cpuinfo, size_t length) { 2519 FILE* fp = fopen("/proc/cpuinfo", "r"); 2520 if (fp != NULL) { 2521 while (!feof(fp)) { 2522 char buf[256]; 2523 if (fgets(buf, sizeof(buf), fp)) { 2524 char* start = strstr(buf, search_string); 2525 if (start != NULL) { 2526 char *ptr = start + strlen(search_string); 2527 char *end = buf + strlen(buf); 2528 while (ptr != end) { 2529 // skip whitespace and colon for the rest of the name. 2530 if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') { 2531 break; 2532 } 2533 ptr++; 2534 } 2535 if (ptr != end) { 2536 // reasonable string, get rid of newline and keep the rest 2537 char* nl = strchr(buf, '\n'); 2538 if (nl != NULL) *nl = '\0'; 2539 strncpy(cpuinfo, ptr, length); 2540 fclose(fp); 2541 return; 2542 } 2543 } 2544 } 2545 } 2546 fclose(fp); 2547 } 2548 // cpuinfo not found or parsing failed, just print generic string. The entire 2549 // /proc/cpuinfo file will be printed later in the file (or enough of it for x86) 2550 #if defined(AARCH64) 2551 strncpy(cpuinfo, "AArch64", length); 2552 #elif defined(AMD64) 2553 strncpy(cpuinfo, "x86_64", length); 2554 #elif defined(ARM) // Order wrt. AARCH64 is relevant! 2555 strncpy(cpuinfo, "ARM", length); 2556 #elif defined(IA32) 2557 strncpy(cpuinfo, "x86_32", length); 2558 #elif defined(IA64) 2559 strncpy(cpuinfo, "IA64", length); 2560 #elif defined(PPC) 2561 strncpy(cpuinfo, "PPC64", length); 2562 #elif defined(S390) 2563 strncpy(cpuinfo, "S390", length); 2564 #elif defined(SPARC) 2565 strncpy(cpuinfo, "sparcv9", length); 2566 #elif defined(ZERO_LIBARCH) 2567 strncpy(cpuinfo, ZERO_LIBARCH, length); 2568 #else 2569 strncpy(cpuinfo, "unknown", length); 2570 #endif 2571 } 2572 2573 static char saved_jvm_path[MAXPATHLEN] = {0}; 2574 2575 // Find the full path to the current module, libjvm.so 2576 void os::jvm_path(char *buf, jint buflen) { 2577 // Error checking. 2578 if (buflen < MAXPATHLEN) { 2579 assert(false, "must use a large-enough buffer"); 2580 buf[0] = '\0'; 2581 return; 2582 } 2583 // Lazy resolve the path to current module. 2584 if (saved_jvm_path[0] != 0) { 2585 strcpy(buf, saved_jvm_path); 2586 return; 2587 } 2588 2589 char dli_fname[MAXPATHLEN]; 2590 dli_fname[0] = '\0'; 2591 bool ret = dll_address_to_library_name( 2592 CAST_FROM_FN_PTR(address, os::jvm_path), 2593 dli_fname, sizeof(dli_fname), NULL); 2594 assert(ret, "cannot locate libjvm"); 2595 char *rp = NULL; 2596 if (ret && dli_fname[0] != '\0') { 2597 rp = os::Posix::realpath(dli_fname, buf, buflen); 2598 } 2599 if (rp == NULL) { 2600 return; 2601 } 2602 2603 if (Arguments::sun_java_launcher_is_altjvm()) { 2604 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 2605 // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so". 2606 // If "/jre/lib/" appears at the right place in the string, then 2607 // assume we are installed in a JDK and we're done. Otherwise, check 2608 // for a JAVA_HOME environment variable and fix up the path so it 2609 // looks like libjvm.so is installed there (append a fake suffix 2610 // hotspot/libjvm.so). 2611 const char *p = buf + strlen(buf) - 1; 2612 for (int count = 0; p > buf && count < 5; ++count) { 2613 for (--p; p > buf && *p != '/'; --p) 2614 /* empty */ ; 2615 } 2616 2617 if (strncmp(p, "/jre/lib/", 9) != 0) { 2618 // Look for JAVA_HOME in the environment. 2619 char* java_home_var = ::getenv("JAVA_HOME"); 2620 if (java_home_var != NULL && java_home_var[0] != 0) { 2621 char* jrelib_p; 2622 int len; 2623 2624 // Check the current module name "libjvm.so". 2625 p = strrchr(buf, '/'); 2626 if (p == NULL) { 2627 return; 2628 } 2629 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2630 2631 rp = os::Posix::realpath(java_home_var, buf, buflen); 2632 if (rp == NULL) { 2633 return; 2634 } 2635 2636 // determine if this is a legacy image or modules image 2637 // modules image doesn't have "jre" subdirectory 2638 len = strlen(buf); 2639 assert(len < buflen, "Ran out of buffer room"); 2640 jrelib_p = buf + len; 2641 snprintf(jrelib_p, buflen-len, "/jre/lib"); 2642 if (0 != access(buf, F_OK)) { 2643 snprintf(jrelib_p, buflen-len, "/lib"); 2644 } 2645 2646 if (0 == access(buf, F_OK)) { 2647 // Use current module name "libjvm.so" 2648 len = strlen(buf); 2649 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2650 } else { 2651 // Go back to path of .so 2652 rp = os::Posix::realpath(dli_fname, buf, buflen); 2653 if (rp == NULL) { 2654 return; 2655 } 2656 } 2657 } 2658 } 2659 } 2660 2661 strncpy(saved_jvm_path, buf, MAXPATHLEN); 2662 saved_jvm_path[MAXPATHLEN - 1] = '\0'; 2663 } 2664 2665 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2666 // no prefix required, not even "_" 2667 } 2668 2669 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2670 // no suffix required 2671 } 2672 2673 //////////////////////////////////////////////////////////////////////////////// 2674 // Virtual Memory 2675 2676 int os::vm_page_size() { 2677 // Seems redundant as all get out 2678 assert(os::Linux::page_size() != -1, "must call os::init"); 2679 return os::Linux::page_size(); 2680 } 2681 2682 // Solaris allocates memory by pages. 2683 int os::vm_allocation_granularity() { 2684 assert(os::Linux::page_size() != -1, "must call os::init"); 2685 return os::Linux::page_size(); 2686 } 2687 2688 // Rationale behind this function: 2689 // current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable 2690 // mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get 2691 // samples for JITted code. Here we create private executable mapping over the code cache 2692 // and then we can use standard (well, almost, as mapping can change) way to provide 2693 // info for the reporting script by storing timestamp and location of symbol 2694 void linux_wrap_code(char* base, size_t size) { 2695 static volatile jint cnt = 0; 2696 2697 if (!UseOprofile) { 2698 return; 2699 } 2700 2701 char buf[PATH_MAX+1]; 2702 int num = Atomic::add(&cnt, 1); 2703 2704 snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d", 2705 os::get_temp_directory(), os::current_process_id(), num); 2706 unlink(buf); 2707 2708 int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU); 2709 2710 if (fd != -1) { 2711 off_t rv = ::lseek(fd, size-2, SEEK_SET); 2712 if (rv != (off_t)-1) { 2713 if (::write(fd, "", 1) == 1) { 2714 mmap(base, size, 2715 PROT_READ|PROT_WRITE|PROT_EXEC, 2716 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0); 2717 } 2718 } 2719 ::close(fd); 2720 unlink(buf); 2721 } 2722 } 2723 2724 static bool recoverable_mmap_error(int err) { 2725 // See if the error is one we can let the caller handle. This 2726 // list of errno values comes from JBS-6843484. I can't find a 2727 // Linux man page that documents this specific set of errno 2728 // values so while this list currently matches Solaris, it may 2729 // change as we gain experience with this failure mode. 2730 switch (err) { 2731 case EBADF: 2732 case EINVAL: 2733 case ENOTSUP: 2734 // let the caller deal with these errors 2735 return true; 2736 2737 default: 2738 // Any remaining errors on this OS can cause our reserved mapping 2739 // to be lost. That can cause confusion where different data 2740 // structures think they have the same memory mapped. The worst 2741 // scenario is if both the VM and a library think they have the 2742 // same memory mapped. 2743 return false; 2744 } 2745 } 2746 2747 static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 2748 int err) { 2749 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2750 ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec, 2751 os::strerror(err), err); 2752 } 2753 2754 static void warn_fail_commit_memory(char* addr, size_t size, 2755 size_t alignment_hint, bool exec, 2756 int err) { 2757 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2758 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, 2759 alignment_hint, exec, os::strerror(err), err); 2760 } 2761 2762 // NOTE: Linux kernel does not really reserve the pages for us. 2763 // All it does is to check if there are enough free pages 2764 // left at the time of mmap(). This could be a potential 2765 // problem. 2766 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) { 2767 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2768 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, 2769 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 2770 if (res != (uintptr_t) MAP_FAILED) { 2771 if (UseNUMAInterleaving) { 2772 numa_make_global(addr, size); 2773 } 2774 return 0; 2775 } 2776 2777 int err = errno; // save errno from mmap() call above 2778 2779 if (!recoverable_mmap_error(err)) { 2780 warn_fail_commit_memory(addr, size, exec, err); 2781 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory."); 2782 } 2783 2784 return err; 2785 } 2786 2787 bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 2788 return os::Linux::commit_memory_impl(addr, size, exec) == 0; 2789 } 2790 2791 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 2792 const char* mesg) { 2793 assert(mesg != NULL, "mesg must be specified"); 2794 int err = os::Linux::commit_memory_impl(addr, size, exec); 2795 if (err != 0) { 2796 // the caller wants all commit errors to exit with the specified mesg: 2797 warn_fail_commit_memory(addr, size, exec, err); 2798 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 2799 } 2800 } 2801 2802 // Define MAP_HUGETLB here so we can build HotSpot on old systems. 2803 #ifndef MAP_HUGETLB 2804 #define MAP_HUGETLB 0x40000 2805 #endif 2806 2807 // If mmap flags are set with MAP_HUGETLB and the system supports multiple 2808 // huge page sizes, flag bits [26:31] can be used to encode the log2 of the 2809 // desired huge page size. Otherwise, the system's default huge page size will be used. 2810 // See mmap(2) man page for more info (since Linux 3.8). 2811 // https://lwn.net/Articles/533499/ 2812 #ifndef MAP_HUGE_SHIFT 2813 #define MAP_HUGE_SHIFT 26 2814 #endif 2815 2816 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems. 2817 #ifndef MADV_HUGEPAGE 2818 #define MADV_HUGEPAGE 14 2819 #endif 2820 2821 int os::Linux::commit_memory_impl(char* addr, size_t size, 2822 size_t alignment_hint, bool exec) { 2823 int err = os::Linux::commit_memory_impl(addr, size, exec); 2824 if (err == 0) { 2825 realign_memory(addr, size, alignment_hint); 2826 } 2827 return err; 2828 } 2829 2830 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 2831 bool exec) { 2832 return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0; 2833 } 2834 2835 void os::pd_commit_memory_or_exit(char* addr, size_t size, 2836 size_t alignment_hint, bool exec, 2837 const char* mesg) { 2838 assert(mesg != NULL, "mesg must be specified"); 2839 int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec); 2840 if (err != 0) { 2841 // the caller wants all commit errors to exit with the specified mesg: 2842 warn_fail_commit_memory(addr, size, alignment_hint, exec, err); 2843 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 2844 } 2845 } 2846 2847 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2848 if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) { 2849 // We don't check the return value: madvise(MADV_HUGEPAGE) may not 2850 // be supported or the memory may already be backed by huge pages. 2851 ::madvise(addr, bytes, MADV_HUGEPAGE); 2852 } 2853 } 2854 2855 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2856 // This method works by doing an mmap over an existing mmaping and effectively discarding 2857 // the existing pages. However it won't work for SHM-based large pages that cannot be 2858 // uncommitted at all. We don't do anything in this case to avoid creating a segment with 2859 // small pages on top of the SHM segment. This method always works for small pages, so we 2860 // allow that in any case. 2861 if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) { 2862 commit_memory(addr, bytes, alignment_hint, !ExecMem); 2863 } 2864 } 2865 2866 void os::numa_make_global(char *addr, size_t bytes) { 2867 Linux::numa_interleave_memory(addr, bytes); 2868 } 2869 2870 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the 2871 // bind policy to MPOL_PREFERRED for the current thread. 2872 #define USE_MPOL_PREFERRED 0 2873 2874 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2875 // To make NUMA and large pages more robust when both enabled, we need to ease 2876 // the requirements on where the memory should be allocated. MPOL_BIND is the 2877 // default policy and it will force memory to be allocated on the specified 2878 // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on 2879 // the specified node, but will not force it. Using this policy will prevent 2880 // getting SIGBUS when trying to allocate large pages on NUMA nodes with no 2881 // free large pages. 2882 Linux::numa_set_bind_policy(USE_MPOL_PREFERRED); 2883 Linux::numa_tonode_memory(addr, bytes, lgrp_hint); 2884 } 2885 2886 bool os::numa_topology_changed() { return false; } 2887 2888 size_t os::numa_get_groups_num() { 2889 // Return just the number of nodes in which it's possible to allocate memory 2890 // (in numa terminology, configured nodes). 2891 return Linux::numa_num_configured_nodes(); 2892 } 2893 2894 int os::numa_get_group_id() { 2895 int cpu_id = Linux::sched_getcpu(); 2896 if (cpu_id != -1) { 2897 int lgrp_id = Linux::get_node_by_cpu(cpu_id); 2898 if (lgrp_id != -1) { 2899 return lgrp_id; 2900 } 2901 } 2902 return 0; 2903 } 2904 2905 int os::numa_get_group_id_for_address(const void* address) { 2906 void** pages = const_cast<void**>(&address); 2907 int id = -1; 2908 2909 if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) { 2910 return -1; 2911 } 2912 if (id < 0) { 2913 return -1; 2914 } 2915 return id; 2916 } 2917 2918 int os::Linux::get_existing_num_nodes() { 2919 int node; 2920 int highest_node_number = Linux::numa_max_node(); 2921 int num_nodes = 0; 2922 2923 // Get the total number of nodes in the system including nodes without memory. 2924 for (node = 0; node <= highest_node_number; node++) { 2925 if (is_node_in_existing_nodes(node)) { 2926 num_nodes++; 2927 } 2928 } 2929 return num_nodes; 2930 } 2931 2932 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2933 int highest_node_number = Linux::numa_max_node(); 2934 size_t i = 0; 2935 2936 // Map all node ids in which it is possible to allocate memory. Also nodes are 2937 // not always consecutively available, i.e. available from 0 to the highest 2938 // node number. If the nodes have been bound explicitly using numactl membind, 2939 // then allocate memory from those nodes only. 2940 for (int node = 0; node <= highest_node_number; node++) { 2941 if (Linux::is_node_in_bound_nodes((unsigned int)node)) { 2942 ids[i++] = node; 2943 } 2944 } 2945 return i; 2946 } 2947 2948 bool os::get_page_info(char *start, page_info* info) { 2949 return false; 2950 } 2951 2952 char *os::scan_pages(char *start, char* end, page_info* page_expected, 2953 page_info* page_found) { 2954 return end; 2955 } 2956 2957 2958 int os::Linux::sched_getcpu_syscall(void) { 2959 unsigned int cpu = 0; 2960 int retval = -1; 2961 2962 #if defined(IA32) 2963 #ifndef SYS_getcpu 2964 #define SYS_getcpu 318 2965 #endif 2966 retval = syscall(SYS_getcpu, &cpu, NULL, NULL); 2967 #elif defined(AMD64) 2968 // Unfortunately we have to bring all these macros here from vsyscall.h 2969 // to be able to compile on old linuxes. 2970 #define __NR_vgetcpu 2 2971 #define VSYSCALL_START (-10UL << 20) 2972 #define VSYSCALL_SIZE 1024 2973 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) 2974 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache); 2975 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu); 2976 retval = vgetcpu(&cpu, NULL, NULL); 2977 #endif 2978 2979 return (retval == -1) ? retval : cpu; 2980 } 2981 2982 void os::Linux::sched_getcpu_init() { 2983 // sched_getcpu() should be in libc. 2984 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, 2985 dlsym(RTLD_DEFAULT, "sched_getcpu"))); 2986 2987 // If it's not, try a direct syscall. 2988 if (sched_getcpu() == -1) { 2989 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, 2990 (void*)&sched_getcpu_syscall)); 2991 } 2992 2993 if (sched_getcpu() == -1) { 2994 vm_exit_during_initialization("getcpu(2) system call not supported by kernel"); 2995 } 2996 } 2997 2998 // Something to do with the numa-aware allocator needs these symbols 2999 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { } 3000 extern "C" JNIEXPORT void numa_error(char *where) { } 3001 3002 // Handle request to load libnuma symbol version 1.1 (API v1). If it fails 3003 // load symbol from base version instead. 3004 void* os::Linux::libnuma_dlsym(void* handle, const char *name) { 3005 void *f = dlvsym(handle, name, "libnuma_1.1"); 3006 if (f == NULL) { 3007 f = dlsym(handle, name); 3008 } 3009 return f; 3010 } 3011 3012 // Handle request to load libnuma symbol version 1.2 (API v2) only. 3013 // Return NULL if the symbol is not defined in this particular version. 3014 void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) { 3015 return dlvsym(handle, name, "libnuma_1.2"); 3016 } 3017 3018 // Check numa dependent syscalls 3019 static bool numa_syscall_check() { 3020 // NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and 3021 // numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons. 3022 // Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary 3023 // to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking 3024 // others like mbind would cause unexpected side effects. 3025 #ifdef SYS_get_mempolicy 3026 int dummy = 0; 3027 if (syscall(SYS_get_mempolicy, &dummy, NULL, 0, (void*)&dummy, 3) == -1) { 3028 return false; 3029 } 3030 #endif 3031 3032 return true; 3033 } 3034 3035 bool os::Linux::libnuma_init() { 3036 // Requires sched_getcpu() and numa dependent syscalls support 3037 if ((sched_getcpu() != -1) && numa_syscall_check()) { 3038 void *handle = dlopen("libnuma.so.1", RTLD_LAZY); 3039 if (handle != NULL) { 3040 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, 3041 libnuma_dlsym(handle, "numa_node_to_cpus"))); 3042 set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t, 3043 libnuma_v2_dlsym(handle, "numa_node_to_cpus"))); 3044 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, 3045 libnuma_dlsym(handle, "numa_max_node"))); 3046 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t, 3047 libnuma_dlsym(handle, "numa_num_configured_nodes"))); 3048 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t, 3049 libnuma_dlsym(handle, "numa_available"))); 3050 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t, 3051 libnuma_dlsym(handle, "numa_tonode_memory"))); 3052 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, 3053 libnuma_dlsym(handle, "numa_interleave_memory"))); 3054 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t, 3055 libnuma_v2_dlsym(handle, "numa_interleave_memory"))); 3056 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, 3057 libnuma_dlsym(handle, "numa_set_bind_policy"))); 3058 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t, 3059 libnuma_dlsym(handle, "numa_bitmask_isbitset"))); 3060 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t, 3061 libnuma_dlsym(handle, "numa_distance"))); 3062 set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t, 3063 libnuma_v2_dlsym(handle, "numa_get_membind"))); 3064 set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t, 3065 libnuma_v2_dlsym(handle, "numa_get_interleave_mask"))); 3066 set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t, 3067 libnuma_dlsym(handle, "numa_move_pages"))); 3068 set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t, 3069 libnuma_dlsym(handle, "numa_set_preferred"))); 3070 3071 if (numa_available() != -1) { 3072 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); 3073 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr")); 3074 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr")); 3075 set_numa_interleave_bitmask(_numa_get_interleave_mask()); 3076 set_numa_membind_bitmask(_numa_get_membind()); 3077 // Create an index -> node mapping, since nodes are not always consecutive 3078 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal); 3079 rebuild_nindex_to_node_map(); 3080 // Create a cpu -> node mapping 3081 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal); 3082 rebuild_cpu_to_node_map(); 3083 return true; 3084 } 3085 } 3086 } 3087 return false; 3088 } 3089 3090 size_t os::Linux::default_guard_size(os::ThreadType thr_type) { 3091 // Creating guard page is very expensive. Java thread has HotSpot 3092 // guard pages, only enable glibc guard page for non-Java threads. 3093 // (Remember: compiler thread is a Java thread, too!) 3094 return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size()); 3095 } 3096 3097 void os::Linux::rebuild_nindex_to_node_map() { 3098 int highest_node_number = Linux::numa_max_node(); 3099 3100 nindex_to_node()->clear(); 3101 for (int node = 0; node <= highest_node_number; node++) { 3102 if (Linux::is_node_in_existing_nodes(node)) { 3103 nindex_to_node()->append(node); 3104 } 3105 } 3106 } 3107 3108 // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id. 3109 // The table is later used in get_node_by_cpu(). 3110 void os::Linux::rebuild_cpu_to_node_map() { 3111 const size_t NCPUS = 32768; // Since the buffer size computation is very obscure 3112 // in libnuma (possible values are starting from 16, 3113 // and continuing up with every other power of 2, but less 3114 // than the maximum number of CPUs supported by kernel), and 3115 // is a subject to change (in libnuma version 2 the requirements 3116 // are more reasonable) we'll just hardcode the number they use 3117 // in the library. 3118 const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; 3119 3120 size_t cpu_num = processor_count(); 3121 size_t cpu_map_size = NCPUS / BitsPerCLong; 3122 size_t cpu_map_valid_size = 3123 MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); 3124 3125 cpu_to_node()->clear(); 3126 cpu_to_node()->at_grow(cpu_num - 1); 3127 3128 size_t node_num = get_existing_num_nodes(); 3129 3130 int distance = 0; 3131 int closest_distance = INT_MAX; 3132 int closest_node = 0; 3133 unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal); 3134 for (size_t i = 0; i < node_num; i++) { 3135 // Check if node is configured (not a memory-less node). If it is not, find 3136 // the closest configured node. Check also if node is bound, i.e. it's allowed 3137 // to allocate memory from the node. If it's not allowed, map cpus in that node 3138 // to the closest node from which memory allocation is allowed. 3139 if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) || 3140 !is_node_in_bound_nodes(nindex_to_node()->at(i))) { 3141 closest_distance = INT_MAX; 3142 // Check distance from all remaining nodes in the system. Ignore distance 3143 // from itself, from another non-configured node, and from another non-bound 3144 // node. 3145 for (size_t m = 0; m < node_num; m++) { 3146 if (m != i && 3147 is_node_in_configured_nodes(nindex_to_node()->at(m)) && 3148 is_node_in_bound_nodes(nindex_to_node()->at(m))) { 3149 distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m)); 3150 // If a closest node is found, update. There is always at least one 3151 // configured and bound node in the system so there is always at least 3152 // one node close. 3153 if (distance != 0 && distance < closest_distance) { 3154 closest_distance = distance; 3155 closest_node = nindex_to_node()->at(m); 3156 } 3157 } 3158 } 3159 } else { 3160 // Current node is already a configured node. 3161 closest_node = nindex_to_node()->at(i); 3162 } 3163 3164 // Get cpus from the original node and map them to the closest node. If node 3165 // is a configured node (not a memory-less node), then original node and 3166 // closest node are the same. 3167 if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { 3168 for (size_t j = 0; j < cpu_map_valid_size; j++) { 3169 if (cpu_map[j] != 0) { 3170 for (size_t k = 0; k < BitsPerCLong; k++) { 3171 if (cpu_map[j] & (1UL << k)) { 3172 int cpu_index = j * BitsPerCLong + k; 3173 3174 #ifndef PRODUCT 3175 if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) { 3176 // Some debuggers limit the processor count without 3177 // intercepting the NUMA APIs. Just fake the values. 3178 cpu_index = 0; 3179 } 3180 #endif 3181 3182 cpu_to_node()->at_put(cpu_index, closest_node); 3183 } 3184 } 3185 } 3186 } 3187 } 3188 } 3189 FREE_C_HEAP_ARRAY(unsigned long, cpu_map); 3190 } 3191 3192 int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) { 3193 // use the latest version of numa_node_to_cpus if available 3194 if (_numa_node_to_cpus_v2 != NULL) { 3195 3196 // libnuma bitmask struct 3197 struct bitmask { 3198 unsigned long size; /* number of bits in the map */ 3199 unsigned long *maskp; 3200 }; 3201 3202 struct bitmask mask; 3203 mask.maskp = (unsigned long *)buffer; 3204 mask.size = bufferlen * 8; 3205 return _numa_node_to_cpus_v2(node, &mask); 3206 } else if (_numa_node_to_cpus != NULL) { 3207 return _numa_node_to_cpus(node, buffer, bufferlen); 3208 } 3209 return -1; 3210 } 3211 3212 int os::Linux::get_node_by_cpu(int cpu_id) { 3213 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { 3214 return cpu_to_node()->at(cpu_id); 3215 } 3216 return -1; 3217 } 3218 3219 GrowableArray<int>* os::Linux::_cpu_to_node; 3220 GrowableArray<int>* os::Linux::_nindex_to_node; 3221 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; 3222 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; 3223 os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2; 3224 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; 3225 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes; 3226 os::Linux::numa_available_func_t os::Linux::_numa_available; 3227 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; 3228 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; 3229 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2; 3230 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; 3231 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset; 3232 os::Linux::numa_distance_func_t os::Linux::_numa_distance; 3233 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind; 3234 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask; 3235 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages; 3236 os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred; 3237 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy; 3238 unsigned long* os::Linux::_numa_all_nodes; 3239 struct bitmask* os::Linux::_numa_all_nodes_ptr; 3240 struct bitmask* os::Linux::_numa_nodes_ptr; 3241 struct bitmask* os::Linux::_numa_interleave_bitmask; 3242 struct bitmask* os::Linux::_numa_membind_bitmask; 3243 3244 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { 3245 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, 3246 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); 3247 return res != (uintptr_t) MAP_FAILED; 3248 } 3249 3250 static address get_stack_commited_bottom(address bottom, size_t size) { 3251 address nbot = bottom; 3252 address ntop = bottom + size; 3253 3254 size_t page_sz = os::vm_page_size(); 3255 unsigned pages = size / page_sz; 3256 3257 unsigned char vec[1]; 3258 unsigned imin = 1, imax = pages + 1, imid; 3259 int mincore_return_value = 0; 3260 3261 assert(imin <= imax, "Unexpected page size"); 3262 3263 while (imin < imax) { 3264 imid = (imax + imin) / 2; 3265 nbot = ntop - (imid * page_sz); 3266 3267 // Use a trick with mincore to check whether the page is mapped or not. 3268 // mincore sets vec to 1 if page resides in memory and to 0 if page 3269 // is swapped output but if page we are asking for is unmapped 3270 // it returns -1,ENOMEM 3271 mincore_return_value = mincore(nbot, page_sz, vec); 3272 3273 if (mincore_return_value == -1) { 3274 // Page is not mapped go up 3275 // to find first mapped page 3276 if (errno != EAGAIN) { 3277 assert(errno == ENOMEM, "Unexpected mincore errno"); 3278 imax = imid; 3279 } 3280 } else { 3281 // Page is mapped go down 3282 // to find first not mapped page 3283 imin = imid + 1; 3284 } 3285 } 3286 3287 nbot = nbot + page_sz; 3288 3289 // Adjust stack bottom one page up if last checked page is not mapped 3290 if (mincore_return_value == -1) { 3291 nbot = nbot + page_sz; 3292 } 3293 3294 return nbot; 3295 } 3296 3297 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { 3298 int mincore_return_value; 3299 const size_t stripe = 1024; // query this many pages each time 3300 unsigned char vec[stripe + 1]; 3301 // set a guard 3302 vec[stripe] = 'X'; 3303 3304 const size_t page_sz = os::vm_page_size(); 3305 size_t pages = size / page_sz; 3306 3307 assert(is_aligned(start, page_sz), "Start address must be page aligned"); 3308 assert(is_aligned(size, page_sz), "Size must be page aligned"); 3309 3310 committed_start = NULL; 3311 3312 int loops = (pages + stripe - 1) / stripe; 3313 int committed_pages = 0; 3314 address loop_base = start; 3315 bool found_range = false; 3316 3317 for (int index = 0; index < loops && !found_range; index ++) { 3318 assert(pages > 0, "Nothing to do"); 3319 int pages_to_query = (pages >= stripe) ? stripe : pages; 3320 pages -= pages_to_query; 3321 3322 // Get stable read 3323 while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN); 3324 3325 // During shutdown, some memory goes away without properly notifying NMT, 3326 // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object. 3327 // Bailout and return as not committed for now. 3328 if (mincore_return_value == -1 && errno == ENOMEM) { 3329 return false; 3330 } 3331 3332 assert(vec[stripe] == 'X', "overflow guard"); 3333 assert(mincore_return_value == 0, "Range must be valid"); 3334 // Process this stripe 3335 for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) { 3336 if ((vec[vecIdx] & 0x01) == 0) { // not committed 3337 // End of current contiguous region 3338 if (committed_start != NULL) { 3339 found_range = true; 3340 break; 3341 } 3342 } else { // committed 3343 // Start of region 3344 if (committed_start == NULL) { 3345 committed_start = loop_base + page_sz * vecIdx; 3346 } 3347 committed_pages ++; 3348 } 3349 } 3350 3351 loop_base += pages_to_query * page_sz; 3352 } 3353 3354 if (committed_start != NULL) { 3355 assert(committed_pages > 0, "Must have committed region"); 3356 assert(committed_pages <= int(size / page_sz), "Can not commit more than it has"); 3357 assert(committed_start >= start && committed_start < start + size, "Out of range"); 3358 committed_size = page_sz * committed_pages; 3359 return true; 3360 } else { 3361 assert(committed_pages == 0, "Should not have committed region"); 3362 return false; 3363 } 3364 } 3365 3366 3367 // Linux uses a growable mapping for the stack, and if the mapping for 3368 // the stack guard pages is not removed when we detach a thread the 3369 // stack cannot grow beyond the pages where the stack guard was 3370 // mapped. If at some point later in the process the stack expands to 3371 // that point, the Linux kernel cannot expand the stack any further 3372 // because the guard pages are in the way, and a segfault occurs. 3373 // 3374 // However, it's essential not to split the stack region by unmapping 3375 // a region (leaving a hole) that's already part of the stack mapping, 3376 // so if the stack mapping has already grown beyond the guard pages at 3377 // the time we create them, we have to truncate the stack mapping. 3378 // So, we need to know the extent of the stack mapping when 3379 // create_stack_guard_pages() is called. 3380 3381 // We only need this for stacks that are growable: at the time of 3382 // writing thread stacks don't use growable mappings (i.e. those 3383 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this 3384 // only applies to the main thread. 3385 3386 // If the (growable) stack mapping already extends beyond the point 3387 // where we're going to put our guard pages, truncate the mapping at 3388 // that point by munmap()ping it. This ensures that when we later 3389 // munmap() the guard pages we don't leave a hole in the stack 3390 // mapping. This only affects the main/primordial thread 3391 3392 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3393 if (os::is_primordial_thread()) { 3394 // As we manually grow stack up to bottom inside create_attached_thread(), 3395 // it's likely that os::Linux::initial_thread_stack_bottom is mapped and 3396 // we don't need to do anything special. 3397 // Check it first, before calling heavy function. 3398 uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom(); 3399 unsigned char vec[1]; 3400 3401 if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) { 3402 // Fallback to slow path on all errors, including EAGAIN 3403 assert((uintptr_t)addr >= stack_extent, 3404 "Sanity: addr should be larger than extent, " PTR_FORMAT " >= " PTR_FORMAT, 3405 p2i(addr), stack_extent); 3406 stack_extent = (uintptr_t) get_stack_commited_bottom( 3407 os::Linux::initial_thread_stack_bottom(), 3408 (size_t)addr - stack_extent); 3409 } 3410 3411 if (stack_extent < (uintptr_t)addr) { 3412 ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent)); 3413 } 3414 } 3415 3416 return os::commit_memory(addr, size, !ExecMem); 3417 } 3418 3419 // If this is a growable mapping, remove the guard pages entirely by 3420 // munmap()ping them. If not, just call uncommit_memory(). This only 3421 // affects the main/primordial thread, but guard against future OS changes. 3422 // It's safe to always unmap guard pages for primordial thread because we 3423 // always place it right after end of the mapped region. 3424 3425 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3426 uintptr_t stack_extent, stack_base; 3427 3428 if (os::is_primordial_thread()) { 3429 return ::munmap(addr, size) == 0; 3430 } 3431 3432 return os::uncommit_memory(addr, size); 3433 } 3434 3435 // 'requested_addr' is only treated as a hint, the return value may or 3436 // may not start from the requested address. Unlike Linux mmap(), this 3437 // function returns NULL to indicate failure. 3438 static char* anon_mmap(char* requested_addr, size_t bytes) { 3439 // MAP_FIXED is intentionally left out, to leave existing mappings intact. 3440 const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; 3441 3442 // Map reserved/uncommitted pages PROT_NONE so we fail early if we 3443 // touch an uncommitted page. Otherwise, the read/write might 3444 // succeed if we have enough swap space to back the physical page. 3445 char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); 3446 3447 return addr == MAP_FAILED ? NULL : addr; 3448 } 3449 3450 // Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address 3451 // (req_addr != NULL) or with a given alignment. 3452 // - bytes shall be a multiple of alignment. 3453 // - req_addr can be NULL. If not NULL, it must be a multiple of alignment. 3454 // - alignment sets the alignment at which memory shall be allocated. 3455 // It must be a multiple of allocation granularity. 3456 // Returns address of memory or NULL. If req_addr was not NULL, will only return 3457 // req_addr or NULL. 3458 static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) { 3459 size_t extra_size = bytes; 3460 if (req_addr == NULL && alignment > 0) { 3461 extra_size += alignment; 3462 } 3463 3464 char* start = anon_mmap(req_addr, extra_size); 3465 if (start != NULL) { 3466 if (req_addr != NULL) { 3467 if (start != req_addr) { 3468 ::munmap(start, extra_size); 3469 start = NULL; 3470 } 3471 } else { 3472 char* const start_aligned = align_up(start, alignment); 3473 char* const end_aligned = start_aligned + bytes; 3474 char* const end = start + extra_size; 3475 if (start_aligned > start) { 3476 ::munmap(start, start_aligned - start); 3477 } 3478 if (end_aligned < end) { 3479 ::munmap(end_aligned, end - end_aligned); 3480 } 3481 start = start_aligned; 3482 } 3483 } 3484 return start; 3485 } 3486 3487 static int anon_munmap(char * addr, size_t size) { 3488 return ::munmap(addr, size) == 0; 3489 } 3490 3491 char* os::pd_reserve_memory(size_t bytes, bool exec) { 3492 return anon_mmap(NULL, bytes); 3493 } 3494 3495 bool os::pd_release_memory(char* addr, size_t size) { 3496 return anon_munmap(addr, size); 3497 } 3498 3499 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT 3500 extern char* g_assert_poison; // assertion poison page address 3501 #endif 3502 3503 static bool linux_mprotect(char* addr, size_t size, int prot) { 3504 // Linux wants the mprotect address argument to be page aligned. 3505 char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size()); 3506 3507 // According to SUSv3, mprotect() should only be used with mappings 3508 // established by mmap(), and mmap() always maps whole pages. Unaligned 3509 // 'addr' likely indicates problem in the VM (e.g. trying to change 3510 // protection of malloc'ed or statically allocated memory). Check the 3511 // caller if you hit this assert. 3512 assert(addr == bottom, "sanity check"); 3513 3514 size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size()); 3515 // Don't log anything if we're executing in the poison page signal handling 3516 // context. It can lead to reentrant use of other parts of the VM code. 3517 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT 3518 if (addr != g_assert_poison) 3519 #endif 3520 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot); 3521 return ::mprotect(bottom, size, prot) == 0; 3522 } 3523 3524 // Set protections specified 3525 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3526 bool is_committed) { 3527 unsigned int p = 0; 3528 switch (prot) { 3529 case MEM_PROT_NONE: p = PROT_NONE; break; 3530 case MEM_PROT_READ: p = PROT_READ; break; 3531 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3532 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3533 default: 3534 ShouldNotReachHere(); 3535 } 3536 // is_committed is unused. 3537 return linux_mprotect(addr, bytes, p); 3538 } 3539 3540 bool os::guard_memory(char* addr, size_t size) { 3541 return linux_mprotect(addr, size, PROT_NONE); 3542 } 3543 3544 bool os::unguard_memory(char* addr, size_t size) { 3545 return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); 3546 } 3547 3548 bool os::Linux::transparent_huge_pages_sanity_check(bool warn, 3549 size_t page_size) { 3550 bool result = false; 3551 void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, 3552 MAP_ANONYMOUS|MAP_PRIVATE, 3553 -1, 0); 3554 if (p != MAP_FAILED) { 3555 void *aligned_p = align_up(p, page_size); 3556 3557 result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0; 3558 3559 munmap(p, page_size * 2); 3560 } 3561 3562 if (warn && !result) { 3563 warning("TransparentHugePages is not supported by the operating system."); 3564 } 3565 3566 return result; 3567 } 3568 3569 int os::Linux::hugetlbfs_page_size_flag(size_t page_size) { 3570 if (page_size != default_large_page_size()) { 3571 return (exact_log2(page_size) << MAP_HUGE_SHIFT); 3572 } 3573 return 0; 3574 } 3575 3576 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { 3577 // Include the page size flag to ensure we sanity check the correct page size. 3578 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); 3579 void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0); 3580 3581 if (p != MAP_FAILED) { 3582 // Mapping succeeded, sanity check passed. 3583 munmap(p, page_size); 3584 return true; 3585 } else { 3586 log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, " 3587 "checking if smaller large page sizes are usable", 3588 byte_size_in_exact_unit(page_size), 3589 exact_unit_for_byte_size(page_size)); 3590 for (size_t page_size_ = _page_sizes.next_smaller(page_size); 3591 page_size_ != (size_t)os::vm_page_size(); 3592 page_size_ = _page_sizes.next_smaller(page_size_)) { 3593 flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_); 3594 p = mmap(NULL, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0); 3595 if (p != MAP_FAILED) { 3596 // Mapping succeeded, sanity check passed. 3597 munmap(p, page_size_); 3598 log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check", 3599 byte_size_in_exact_unit(page_size_), 3600 exact_unit_for_byte_size(page_size_)); 3601 return true; 3602 } 3603 } 3604 } 3605 3606 if (warn) { 3607 warning("HugeTLBFS is not configured or not supported by the operating system."); 3608 } 3609 3610 return false; 3611 } 3612 3613 bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) { 3614 // Try to create a large shared memory segment. 3615 int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); 3616 if (shmid == -1) { 3617 // Possible reasons for shmget failure: 3618 // 1. shmmax is too small for the request. 3619 // > check shmmax value: cat /proc/sys/kernel/shmmax 3620 // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax 3621 // 2. not enough large page memory. 3622 // > check available large pages: cat /proc/meminfo 3623 // > increase amount of large pages: 3624 // sysctl -w vm.nr_hugepages=new_value 3625 // > For more information regarding large pages please refer to: 3626 // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt 3627 if (warn) { 3628 warning("Large pages using UseSHM are not configured on this system."); 3629 } 3630 return false; 3631 } 3632 // Managed to create a segment, now delete it. 3633 shmctl(shmid, IPC_RMID, NULL); 3634 return true; 3635 } 3636 3637 // From the coredump_filter documentation: 3638 // 3639 // - (bit 0) anonymous private memory 3640 // - (bit 1) anonymous shared memory 3641 // - (bit 2) file-backed private memory 3642 // - (bit 3) file-backed shared memory 3643 // - (bit 4) ELF header pages in file-backed private memory areas (it is 3644 // effective only if the bit 2 is cleared) 3645 // - (bit 5) hugetlb private memory 3646 // - (bit 6) hugetlb shared memory 3647 // - (bit 7) dax private memory 3648 // - (bit 8) dax shared memory 3649 // 3650 static void set_coredump_filter(CoredumpFilterBit bit) { 3651 FILE *f; 3652 long cdm; 3653 3654 if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) { 3655 return; 3656 } 3657 3658 if (fscanf(f, "%lx", &cdm) != 1) { 3659 fclose(f); 3660 return; 3661 } 3662 3663 long saved_cdm = cdm; 3664 rewind(f); 3665 cdm |= bit; 3666 3667 if (cdm != saved_cdm) { 3668 fprintf(f, "%#lx", cdm); 3669 } 3670 3671 fclose(f); 3672 } 3673 3674 // Large page support 3675 3676 static size_t _large_page_size = 0; 3677 3678 static size_t scan_default_large_page_size() { 3679 size_t default_large_page_size = 0; 3680 3681 // large_page_size on Linux is used to round up heap size. x86 uses either 3682 // 2M or 4M page, depending on whether PAE (Physical Address Extensions) 3683 // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use 3684 // page as large as 1G. 3685 // 3686 // Here we try to figure out page size by parsing /proc/meminfo and looking 3687 // for a line with the following format: 3688 // Hugepagesize: 2048 kB 3689 // 3690 // If we can't determine the value (e.g. /proc is not mounted, or the text 3691 // format has been changed), we'll set largest page size to 0 3692 3693 FILE *fp = fopen("/proc/meminfo", "r"); 3694 if (fp) { 3695 while (!feof(fp)) { 3696 int x = 0; 3697 char buf[16]; 3698 if (fscanf(fp, "Hugepagesize: %d", &x) == 1) { 3699 if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) { 3700 default_large_page_size = x * K; 3701 break; 3702 } 3703 } else { 3704 // skip to next line 3705 for (;;) { 3706 int ch = fgetc(fp); 3707 if (ch == EOF || ch == (int)'\n') break; 3708 } 3709 } 3710 } 3711 fclose(fp); 3712 } 3713 3714 return default_large_page_size; 3715 } 3716 3717 static os::PageSizes scan_multiple_page_support() { 3718 // Scan /sys/kernel/mm/hugepages 3719 // to discover the available page sizes 3720 const char* sys_hugepages = "/sys/kernel/mm/hugepages"; 3721 os::PageSizes page_sizes; 3722 3723 DIR *dir = opendir(sys_hugepages); 3724 3725 struct dirent *entry; 3726 size_t page_size; 3727 while ((entry = readdir(dir)) != NULL) { 3728 if (entry->d_type == DT_DIR && 3729 sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) { 3730 // The kernel is using kB, hotspot uses bytes 3731 // Add each found Large Page Size to page_sizes 3732 page_sizes.add(page_size * K); 3733 } 3734 } 3735 closedir(dir); 3736 3737 LogTarget(Debug, pagesize) lt; 3738 if (lt.is_enabled()) { 3739 LogStream ls(lt); 3740 ls.print("Large Page sizes: "); 3741 page_sizes.print_on(&ls); 3742 } 3743 3744 return page_sizes; 3745 } 3746 3747 size_t os::Linux::default_large_page_size() { 3748 return _default_large_page_size; 3749 } 3750 3751 void warn_no_large_pages_configured() { 3752 if (!FLAG_IS_DEFAULT(UseLargePages)) { 3753 log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system."); 3754 } 3755 } 3756 3757 bool os::Linux::setup_large_page_type(size_t page_size) { 3758 if (FLAG_IS_DEFAULT(UseHugeTLBFS) && 3759 FLAG_IS_DEFAULT(UseSHM) && 3760 FLAG_IS_DEFAULT(UseTransparentHugePages)) { 3761 3762 // The type of large pages has not been specified by the user. 3763 3764 // Try UseHugeTLBFS and then UseSHM. 3765 UseHugeTLBFS = UseSHM = true; 3766 3767 // Don't try UseTransparentHugePages since there are known 3768 // performance issues with it turned on. This might change in the future. 3769 UseTransparentHugePages = false; 3770 } 3771 3772 if (UseTransparentHugePages) { 3773 bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages); 3774 if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) { 3775 UseHugeTLBFS = false; 3776 UseSHM = false; 3777 return true; 3778 } 3779 UseTransparentHugePages = false; 3780 } 3781 3782 if (UseHugeTLBFS) { 3783 bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); 3784 if (hugetlbfs_sanity_check(warn_on_failure, page_size)) { 3785 UseSHM = false; 3786 return true; 3787 } 3788 UseHugeTLBFS = false; 3789 } 3790 3791 if (UseSHM) { 3792 bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM); 3793 if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) { 3794 return true; 3795 } 3796 UseSHM = false; 3797 } 3798 3799 warn_no_large_pages_configured(); 3800 return false; 3801 } 3802 3803 void os::large_page_init() { 3804 // 1) Handle the case where we do not want to use huge pages and hence 3805 // there is no need to scan the OS for related info 3806 if (!UseLargePages && 3807 !UseTransparentHugePages && 3808 !UseHugeTLBFS && 3809 !UseSHM) { 3810 // Not using large pages. 3811 return; 3812 } 3813 3814 if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) { 3815 // The user explicitly turned off large pages. 3816 // Ignore the rest of the large pages flags. 3817 UseTransparentHugePages = false; 3818 UseHugeTLBFS = false; 3819 UseSHM = false; 3820 return; 3821 } 3822 3823 // 2) Scan OS info 3824 size_t default_large_page_size = scan_default_large_page_size(); 3825 os::Linux::_default_large_page_size = default_large_page_size; 3826 if (default_large_page_size == 0) { 3827 // No large pages configured, return. 3828 warn_no_large_pages_configured(); 3829 UseLargePages = false; 3830 UseTransparentHugePages = false; 3831 UseHugeTLBFS = false; 3832 UseSHM = false; 3833 return; 3834 } 3835 os::PageSizes all_large_pages = scan_multiple_page_support(); 3836 3837 // 3) Consistency check and post-processing 3838 3839 // It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually 3840 // re-add the default page size to the list of page sizes to be sure. 3841 all_large_pages.add(default_large_page_size); 3842 3843 // Check LargePageSizeInBytes matches an available page size and if so set _large_page_size 3844 // using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes 3845 // doesn't match an available page size set _large_page_size to default_large_page_size 3846 // and use it as the maximum. 3847 if (FLAG_IS_DEFAULT(LargePageSizeInBytes) || 3848 LargePageSizeInBytes == 0 || 3849 LargePageSizeInBytes == default_large_page_size) { 3850 _large_page_size = default_large_page_size; 3851 log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s", 3852 byte_size_in_exact_unit(_large_page_size), 3853 exact_unit_for_byte_size(_large_page_size)); 3854 } else { 3855 if (all_large_pages.contains(LargePageSizeInBytes)) { 3856 _large_page_size = LargePageSizeInBytes; 3857 log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) " 3858 "using LargePageSizeInBytes: " SIZE_FORMAT "%s", 3859 byte_size_in_exact_unit(default_large_page_size), 3860 exact_unit_for_byte_size(default_large_page_size), 3861 byte_size_in_exact_unit(_large_page_size), 3862 exact_unit_for_byte_size(_large_page_size)); 3863 } else { 3864 _large_page_size = default_large_page_size; 3865 log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) " 3866 "using the default large page size: " SIZE_FORMAT "%s", 3867 byte_size_in_exact_unit(LargePageSizeInBytes), 3868 exact_unit_for_byte_size(LargePageSizeInBytes), 3869 byte_size_in_exact_unit(_large_page_size), 3870 exact_unit_for_byte_size(_large_page_size)); 3871 } 3872 } 3873 3874 // Populate _page_sizes with large page sizes less than or equal to 3875 // _large_page_size. 3876 for (size_t page_size = _large_page_size; page_size != 0; 3877 page_size = all_large_pages.next_smaller(page_size)) { 3878 _page_sizes.add(page_size); 3879 } 3880 3881 LogTarget(Info, pagesize) lt; 3882 if (lt.is_enabled()) { 3883 LogStream ls(lt); 3884 ls.print("Usable page sizes: "); 3885 _page_sizes.print_on(&ls); 3886 } 3887 3888 // Now determine the type of large pages to use: 3889 UseLargePages = os::Linux::setup_large_page_type(_large_page_size); 3890 3891 set_coredump_filter(LARGEPAGES_BIT); 3892 } 3893 3894 #ifndef SHM_HUGETLB 3895 #define SHM_HUGETLB 04000 3896 #endif 3897 3898 #define shm_warning_format(format, ...) \ 3899 do { \ 3900 if (UseLargePages && \ 3901 (!FLAG_IS_DEFAULT(UseLargePages) || \ 3902 !FLAG_IS_DEFAULT(UseSHM) || \ 3903 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \ 3904 warning(format, __VA_ARGS__); \ 3905 } \ 3906 } while (0) 3907 3908 #define shm_warning(str) shm_warning_format("%s", str) 3909 3910 #define shm_warning_with_errno(str) \ 3911 do { \ 3912 int err = errno; \ 3913 shm_warning_format(str " (error = %d)", err); \ 3914 } while (0) 3915 3916 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) { 3917 assert(is_aligned(bytes, alignment), "Must be divisible by the alignment"); 3918 3919 if (!is_aligned(alignment, SHMLBA)) { 3920 assert(false, "Code below assumes that alignment is at least SHMLBA aligned"); 3921 return NULL; 3922 } 3923 3924 // To ensure that we get 'alignment' aligned memory from shmat, 3925 // we pre-reserve aligned virtual memory and then attach to that. 3926 3927 char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment); 3928 if (pre_reserved_addr == NULL) { 3929 // Couldn't pre-reserve aligned memory. 3930 shm_warning("Failed to pre-reserve aligned memory for shmat."); 3931 return NULL; 3932 } 3933 3934 // SHM_REMAP is needed to allow shmat to map over an existing mapping. 3935 char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP); 3936 3937 if ((intptr_t)addr == -1) { 3938 int err = errno; 3939 shm_warning_with_errno("Failed to attach shared memory."); 3940 3941 assert(err != EACCES, "Unexpected error"); 3942 assert(err != EIDRM, "Unexpected error"); 3943 assert(err != EINVAL, "Unexpected error"); 3944 3945 // Since we don't know if the kernel unmapped the pre-reserved memory area 3946 // we can't unmap it, since that would potentially unmap memory that was 3947 // mapped from other threads. 3948 return NULL; 3949 } 3950 3951 return addr; 3952 } 3953 3954 static char* shmat_at_address(int shmid, char* req_addr) { 3955 if (!is_aligned(req_addr, SHMLBA)) { 3956 assert(false, "Requested address needs to be SHMLBA aligned"); 3957 return NULL; 3958 } 3959 3960 char* addr = (char*)shmat(shmid, req_addr, 0); 3961 3962 if ((intptr_t)addr == -1) { 3963 shm_warning_with_errno("Failed to attach shared memory."); 3964 return NULL; 3965 } 3966 3967 return addr; 3968 } 3969 3970 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) { 3971 // If a req_addr has been provided, we assume that the caller has already aligned the address. 3972 if (req_addr != NULL) { 3973 assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size"); 3974 assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment"); 3975 return shmat_at_address(shmid, req_addr); 3976 } 3977 3978 // Since shmid has been setup with SHM_HUGETLB, shmat will automatically 3979 // return large page size aligned memory addresses when req_addr == NULL. 3980 // However, if the alignment is larger than the large page size, we have 3981 // to manually ensure that the memory returned is 'alignment' aligned. 3982 if (alignment > os::large_page_size()) { 3983 assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size"); 3984 return shmat_with_alignment(shmid, bytes, alignment); 3985 } else { 3986 return shmat_at_address(shmid, NULL); 3987 } 3988 } 3989 3990 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, 3991 char* req_addr, bool exec) { 3992 // "exec" is passed in but not used. Creating the shared image for 3993 // the code cache doesn't have an SHM_X executable permission to check. 3994 assert(UseLargePages && UseSHM, "only for SHM large pages"); 3995 assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address"); 3996 assert(is_aligned(req_addr, alignment), "Unaligned address"); 3997 3998 if (!is_aligned(bytes, os::large_page_size())) { 3999 return NULL; // Fallback to small pages. 4000 } 4001 4002 // Create a large shared memory region to attach to based on size. 4003 // Currently, size is the total size of the heap. 4004 int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); 4005 if (shmid == -1) { 4006 // Possible reasons for shmget failure: 4007 // 1. shmmax is too small for the request. 4008 // > check shmmax value: cat /proc/sys/kernel/shmmax 4009 // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax 4010 // 2. not enough large page memory. 4011 // > check available large pages: cat /proc/meminfo 4012 // > increase amount of large pages: 4013 // sysctl -w vm.nr_hugepages=new_value 4014 // > For more information regarding large pages please refer to: 4015 // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt 4016 // Note 1: different Linux may use different name for this property, 4017 // e.g. on Redhat AS-3 it is "hugetlb_pool". 4018 // Note 2: it's possible there's enough physical memory available but 4019 // they are so fragmented after a long run that they can't 4020 // coalesce into large pages. Try to reserve large pages when 4021 // the system is still "fresh". 4022 shm_warning_with_errno("Failed to reserve shared memory."); 4023 return NULL; 4024 } 4025 4026 // Attach to the region. 4027 char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr); 4028 4029 // Remove shmid. If shmat() is successful, the actual shared memory segment 4030 // will be deleted when it's detached by shmdt() or when the process 4031 // terminates. If shmat() is not successful this will remove the shared 4032 // segment immediately. 4033 shmctl(shmid, IPC_RMID, NULL); 4034 4035 return addr; 4036 } 4037 4038 static void warn_on_commit_special_failure(char* req_addr, size_t bytes, 4039 size_t page_size, int error) { 4040 assert(error == ENOMEM, "Only expect to fail if no memory is available"); 4041 4042 bool warn_on_failure = UseLargePages && 4043 (!FLAG_IS_DEFAULT(UseLargePages) || 4044 !FLAG_IS_DEFAULT(UseHugeTLBFS) || 4045 !FLAG_IS_DEFAULT(LargePageSizeInBytes)); 4046 4047 if (warn_on_failure) { 4048 char msg[128]; 4049 jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory. req_addr: " 4050 PTR_FORMAT " bytes: " SIZE_FORMAT " page size: " 4051 SIZE_FORMAT " (errno = %d).", 4052 req_addr, bytes, page_size, error); 4053 warning("%s", msg); 4054 } 4055 } 4056 4057 bool os::Linux::commit_memory_special(size_t bytes, 4058 size_t page_size, 4059 char* req_addr, 4060 bool exec) { 4061 assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used"); 4062 assert(is_aligned(bytes, page_size), "Unaligned size"); 4063 assert(is_aligned(req_addr, page_size), "Unaligned address"); 4064 assert(req_addr != NULL, "Must have a requested address for special mappings"); 4065 4066 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 4067 int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED; 4068 4069 // For large pages additional flags are required. 4070 if (page_size > (size_t) os::vm_page_size()) { 4071 flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); 4072 } 4073 char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0); 4074 4075 if (addr == MAP_FAILED) { 4076 warn_on_commit_special_failure(req_addr, bytes, page_size, errno); 4077 return false; 4078 } 4079 4080 log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size=" 4081 SIZE_FORMAT "%s", 4082 p2i(addr), byte_size_in_exact_unit(bytes), 4083 exact_unit_for_byte_size(bytes), 4084 byte_size_in_exact_unit(page_size), 4085 exact_unit_for_byte_size(page_size)); 4086 assert(is_aligned(addr, page_size), "Must be"); 4087 return true; 4088 } 4089 4090 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, 4091 size_t alignment, 4092 size_t page_size, 4093 char* req_addr, 4094 bool exec) { 4095 assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); 4096 assert(is_aligned(req_addr, alignment), "Must be"); 4097 assert(is_aligned(req_addr, page_size), "Must be"); 4098 assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be"); 4099 assert(_page_sizes.contains(page_size), "Must be a valid page size"); 4100 assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size"); 4101 assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes"); 4102 4103 // We only end up here when at least 1 large page can be used. 4104 // If the size is not a multiple of the large page size, we 4105 // will mix the type of pages used, but in a decending order. 4106 // Start off by reserving a range of the given size that is 4107 // properly aligned. At this point no pages are committed. If 4108 // a requested address is given it will be used and it must be 4109 // aligned to both the large page size and the given alignment. 4110 // The larger of the two will be used. 4111 size_t required_alignment = MAX(page_size, alignment); 4112 char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment); 4113 if (aligned_start == NULL) { 4114 return NULL; 4115 } 4116 4117 // First commit using large pages. 4118 size_t large_bytes = align_down(bytes, page_size); 4119 bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec); 4120 4121 if (large_committed && bytes == large_bytes) { 4122 // The size was large page aligned so no additional work is 4123 // needed even if the commit failed. 4124 return aligned_start; 4125 } 4126 4127 // The requested size requires some small pages as well. 4128 char* small_start = aligned_start + large_bytes; 4129 size_t small_size = bytes - large_bytes; 4130 if (!large_committed) { 4131 // Failed to commit large pages, so we need to unmap the 4132 // reminder of the orinal reservation. 4133 ::munmap(small_start, small_size); 4134 return NULL; 4135 } 4136 4137 // Commit the remaining bytes using small pages. 4138 bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec); 4139 if (!small_committed) { 4140 // Failed to commit the remaining size, need to unmap 4141 // the large pages part of the reservation. 4142 ::munmap(aligned_start, large_bytes); 4143 return NULL; 4144 } 4145 return aligned_start; 4146 } 4147 4148 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, 4149 char* req_addr, bool exec) { 4150 assert(UseLargePages, "only for large pages"); 4151 4152 char* addr; 4153 if (UseSHM) { 4154 // No support for using specific page sizes with SHM. 4155 addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec); 4156 } else { 4157 assert(UseHugeTLBFS, "must be"); 4158 addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec); 4159 } 4160 4161 if (addr != NULL) { 4162 if (UseNUMAInterleaving) { 4163 numa_make_global(addr, bytes); 4164 } 4165 } 4166 4167 return addr; 4168 } 4169 4170 bool os::Linux::release_memory_special_shm(char* base, size_t bytes) { 4171 // detaching the SHM segment will also delete it, see reserve_memory_special_shm() 4172 return shmdt(base) == 0; 4173 } 4174 4175 bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) { 4176 return pd_release_memory(base, bytes); 4177 } 4178 4179 bool os::pd_release_memory_special(char* base, size_t bytes) { 4180 assert(UseLargePages, "only for large pages"); 4181 bool res; 4182 4183 if (UseSHM) { 4184 res = os::Linux::release_memory_special_shm(base, bytes); 4185 } else { 4186 assert(UseHugeTLBFS, "must be"); 4187 res = os::Linux::release_memory_special_huge_tlbfs(base, bytes); 4188 } 4189 return res; 4190 } 4191 4192 size_t os::large_page_size() { 4193 return _large_page_size; 4194 } 4195 4196 // With SysV SHM the entire memory region must be allocated as shared 4197 // memory. 4198 // HugeTLBFS allows application to commit large page memory on demand. 4199 // However, when committing memory with HugeTLBFS fails, the region 4200 // that was supposed to be committed will lose the old reservation 4201 // and allow other threads to steal that memory region. Because of this 4202 // behavior we can't commit HugeTLBFS memory. 4203 bool os::can_commit_large_page_memory() { 4204 return UseTransparentHugePages; 4205 } 4206 4207 bool os::can_execute_large_page_memory() { 4208 return UseTransparentHugePages || UseHugeTLBFS; 4209 } 4210 4211 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) { 4212 assert(file_desc >= 0, "file_desc is not valid"); 4213 char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem); 4214 if (result != NULL) { 4215 if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) { 4216 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); 4217 } 4218 } 4219 return result; 4220 } 4221 4222 // Reserve memory at an arbitrary address, only if that area is 4223 // available (and not reserved for something else). 4224 4225 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) { 4226 // Assert only that the size is a multiple of the page size, since 4227 // that's all that mmap requires, and since that's all we really know 4228 // about at this low abstraction level. If we need higher alignment, 4229 // we can either pass an alignment to this method or verify alignment 4230 // in one of the methods further up the call chain. See bug 5044738. 4231 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 4232 4233 // Repeatedly allocate blocks until the block is allocated at the 4234 // right spot. 4235 4236 // Linux mmap allows caller to pass an address as hint; give it a try first, 4237 // if kernel honors the hint then we can return immediately. 4238 char * addr = anon_mmap(requested_addr, bytes); 4239 if (addr == requested_addr) { 4240 return requested_addr; 4241 } 4242 4243 if (addr != NULL) { 4244 // mmap() is successful but it fails to reserve at the requested address 4245 anon_munmap(addr, bytes); 4246 } 4247 4248 return NULL; 4249 } 4250 4251 // Used to convert frequent JVM_Yield() to nops 4252 bool os::dont_yield() { 4253 return DontYieldALot; 4254 } 4255 4256 // Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will 4257 // actually give up the CPU. Since skip buddy (v2.6.28): 4258 // 4259 // * Sets the yielding task as skip buddy for current CPU's run queue. 4260 // * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task). 4261 // * Clears skip buddies for this run queue (yielding task no longer a skip buddy). 4262 // 4263 // An alternative is calling os::naked_short_nanosleep with a small number to avoid 4264 // getting re-scheduled immediately. 4265 // 4266 void os::naked_yield() { 4267 sched_yield(); 4268 } 4269 4270 //////////////////////////////////////////////////////////////////////////////// 4271 // thread priority support 4272 4273 // Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER 4274 // only supports dynamic priority, static priority must be zero. For real-time 4275 // applications, Linux supports SCHED_RR which allows static priority (1-99). 4276 // However, for large multi-threaded applications, SCHED_RR is not only slower 4277 // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out 4278 // of 5 runs - Sep 2005). 4279 // 4280 // The following code actually changes the niceness of kernel-thread/LWP. It 4281 // has an assumption that setpriority() only modifies one kernel-thread/LWP, 4282 // not the entire user process, and user level threads are 1:1 mapped to kernel 4283 // threads. It has always been the case, but could change in the future. For 4284 // this reason, the code should not be used as default (ThreadPriorityPolicy=0). 4285 // It is only used when ThreadPriorityPolicy=1 and may require system level permission 4286 // (e.g., root privilege or CAP_SYS_NICE capability). 4287 4288 int os::java_to_os_priority[CriticalPriority + 1] = { 4289 19, // 0 Entry should never be used 4290 4291 4, // 1 MinPriority 4292 3, // 2 4293 2, // 3 4294 4295 1, // 4 4296 0, // 5 NormPriority 4297 -1, // 6 4298 4299 -2, // 7 4300 -3, // 8 4301 -4, // 9 NearMaxPriority 4302 4303 -5, // 10 MaxPriority 4304 4305 -5 // 11 CriticalPriority 4306 }; 4307 4308 static int prio_init() { 4309 if (ThreadPriorityPolicy == 1) { 4310 if (geteuid() != 0) { 4311 if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) { 4312 warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \ 4313 "e.g., being the root user. If the necessary permission is not " \ 4314 "possessed, changes to priority will be silently ignored."); 4315 } 4316 } 4317 } 4318 if (UseCriticalJavaThreadPriority) { 4319 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 4320 } 4321 return 0; 4322 } 4323 4324 OSReturn os::set_native_priority(Thread* thread, int newpri) { 4325 if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK; 4326 4327 int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri); 4328 return (ret == 0) ? OS_OK : OS_ERR; 4329 } 4330 4331 OSReturn os::get_native_priority(const Thread* const thread, 4332 int *priority_ptr) { 4333 if (!UseThreadPriorities || ThreadPriorityPolicy == 0) { 4334 *priority_ptr = java_to_os_priority[NormPriority]; 4335 return OS_OK; 4336 } 4337 4338 errno = 0; 4339 *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id()); 4340 return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); 4341 } 4342 4343 // This is the fastest way to get thread cpu time on Linux. 4344 // Returns cpu time (user+sys) for any thread, not only for current. 4345 // POSIX compliant clocks are implemented in the kernels 2.6.16+. 4346 // It might work on 2.6.10+ with a special kernel/glibc patch. 4347 // For reference, please, see IEEE Std 1003.1-2004: 4348 // http://www.unix.org/single_unix_specification 4349 4350 jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) { 4351 struct timespec tp; 4352 int status = clock_gettime(clockid, &tp); 4353 assert(status == 0, "clock_gettime error: %s", os::strerror(errno)); 4354 return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec; 4355 } 4356 4357 // Determine if the vmid is the parent pid for a child in a PID namespace. 4358 // Return the namespace pid if so, otherwise -1. 4359 int os::Linux::get_namespace_pid(int vmid) { 4360 char fname[24]; 4361 int retpid = -1; 4362 4363 snprintf(fname, sizeof(fname), "/proc/%d/status", vmid); 4364 FILE *fp = fopen(fname, "r"); 4365 4366 if (fp) { 4367 int pid, nspid; 4368 int ret; 4369 while (!feof(fp) && !ferror(fp)) { 4370 ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid); 4371 if (ret == 1) { 4372 break; 4373 } 4374 if (ret == 2) { 4375 retpid = nspid; 4376 break; 4377 } 4378 for (;;) { 4379 int ch = fgetc(fp); 4380 if (ch == EOF || ch == (int)'\n') break; 4381 } 4382 } 4383 fclose(fp); 4384 } 4385 return retpid; 4386 } 4387 4388 extern void report_error(char* file_name, int line_no, char* title, 4389 char* format, ...); 4390 4391 // Some linux distributions (notably: Alpine Linux) include the 4392 // grsecurity in the kernel. Of particular interest from a JVM perspective 4393 // is PaX (https://pax.grsecurity.net/), which adds some security features 4394 // related to page attributes. Specifically, the MPROTECT PaX functionality 4395 // (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic 4396 // code generation by disallowing a (previously) writable page to be 4397 // marked as executable. This is, of course, exactly what HotSpot does 4398 // for both JIT compiled method, as well as for stubs, adapters, etc. 4399 // 4400 // Instead of crashing "lazily" when trying to make a page executable, 4401 // this code probes for the presence of PaX and reports the failure 4402 // eagerly. 4403 static void check_pax(void) { 4404 // Zero doesn't generate code dynamically, so no need to perform the PaX check 4405 #ifndef ZERO 4406 size_t size = os::Linux::page_size(); 4407 4408 void* p = ::mmap(NULL, size, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 4409 if (p == MAP_FAILED) { 4410 log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno)); 4411 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check."); 4412 } 4413 4414 int res = ::mprotect(p, size, PROT_WRITE|PROT_EXEC); 4415 if (res == -1) { 4416 log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno)); 4417 vm_exit_during_initialization( 4418 "Failed to mark memory page as executable - check if grsecurity/PaX is enabled"); 4419 } 4420 4421 ::munmap(p, size); 4422 #endif 4423 } 4424 4425 // this is called _before_ most of the global arguments have been parsed 4426 void os::init(void) { 4427 char dummy; // used to get a guess on initial stack address 4428 4429 clock_tics_per_sec = sysconf(_SC_CLK_TCK); 4430 4431 Linux::set_page_size(sysconf(_SC_PAGESIZE)); 4432 if (Linux::page_size() == -1) { 4433 fatal("os_linux.cpp: os::init: sysconf failed (%s)", 4434 os::strerror(errno)); 4435 } 4436 _page_sizes.add(Linux::page_size()); 4437 4438 Linux::initialize_system_info(); 4439 4440 #ifdef __GLIBC__ 4441 Linux::_mallinfo = CAST_TO_FN_PTR(Linux::mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo")); 4442 Linux::_mallinfo2 = CAST_TO_FN_PTR(Linux::mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2")); 4443 #endif // __GLIBC__ 4444 4445 os::Linux::CPUPerfTicks pticks; 4446 bool res = os::Linux::get_tick_information(&pticks, -1); 4447 4448 if (res && pticks.has_steal_ticks) { 4449 has_initial_tick_info = true; 4450 initial_total_ticks = pticks.total; 4451 initial_steal_ticks = pticks.steal; 4452 } 4453 4454 // _main_thread points to the thread that created/loaded the JVM. 4455 Linux::_main_thread = pthread_self(); 4456 4457 // retrieve entry point for pthread_setname_np 4458 Linux::_pthread_setname_np = 4459 (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np"); 4460 4461 check_pax(); 4462 4463 os::Posix::init(); 4464 4465 initial_time_count = javaTimeNanos(); 4466 } 4467 4468 // To install functions for atexit system call 4469 extern "C" { 4470 static void perfMemory_exit_helper() { 4471 perfMemory_exit(); 4472 } 4473 } 4474 4475 void os::pd_init_container_support() { 4476 OSContainer::init(); 4477 } 4478 4479 void os::Linux::numa_init() { 4480 4481 // Java can be invoked as 4482 // 1. Without numactl and heap will be allocated/configured on all nodes as 4483 // per the system policy. 4484 // 2. With numactl --interleave: 4485 // Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same 4486 // API for membind case bitmask is reset. 4487 // Interleave is only hint and Kernel can fallback to other nodes if 4488 // no memory is available on the target nodes. 4489 // 3. With numactl --membind: 4490 // Use numa_get_membind(v2) API to get nodes bitmask. The same API for 4491 // interleave case returns bitmask of all nodes. 4492 // numa_all_nodes_ptr holds bitmask of all nodes. 4493 // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct 4494 // bitmask when externally configured to run on all or fewer nodes. 4495 4496 if (!Linux::libnuma_init()) { 4497 FLAG_SET_ERGO(UseNUMA, false); 4498 FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma. 4499 } else { 4500 if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) { 4501 // If there's only one node (they start from 0) or if the process 4502 // is bound explicitly to a single node using membind, disable NUMA 4503 UseNUMA = false; 4504 } else { 4505 LogTarget(Info,os) log; 4506 LogStream ls(log); 4507 4508 Linux::set_configured_numa_policy(Linux::identify_numa_policy()); 4509 4510 struct bitmask* bmp = Linux::_numa_membind_bitmask; 4511 const char* numa_mode = "membind"; 4512 4513 if (Linux::is_running_in_interleave_mode()) { 4514 bmp = Linux::_numa_interleave_bitmask; 4515 numa_mode = "interleave"; 4516 } 4517 4518 ls.print("UseNUMA is enabled and invoked in '%s' mode." 4519 " Heap will be configured using NUMA memory nodes:", numa_mode); 4520 4521 for (int node = 0; node <= Linux::numa_max_node(); node++) { 4522 if (Linux::_numa_bitmask_isbitset(bmp, node)) { 4523 ls.print(" %d", node); 4524 } 4525 } 4526 } 4527 } 4528 4529 // When NUMA requested, not-NUMA-aware allocations default to interleaving. 4530 if (UseNUMA && !UseNUMAInterleaving) { 4531 FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true); 4532 } 4533 4534 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { 4535 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way 4536 // we can make the adaptive lgrp chunk resizing work. If the user specified both 4537 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn 4538 // and disable adaptive resizing. 4539 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { 4540 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, " 4541 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); 4542 UseAdaptiveSizePolicy = false; 4543 UseAdaptiveNUMAChunkSizing = false; 4544 } 4545 } 4546 } 4547 4548 // this is called _after_ the global arguments have been parsed 4549 jint os::init_2(void) { 4550 4551 // This could be set after os::Posix::init() but all platforms 4552 // have to set it the same so we have to mirror Solaris. 4553 DEBUG_ONLY(os::set_mutex_init_done();) 4554 4555 os::Posix::init_2(); 4556 4557 Linux::fast_thread_clock_init(); 4558 4559 if (PosixSignals::init() == JNI_ERR) { 4560 return JNI_ERR; 4561 } 4562 4563 if (AdjustStackSizeForTLS) { 4564 get_minstack_init(); 4565 } 4566 4567 // Check and sets minimum stack sizes against command line options 4568 if (Posix::set_minimum_stack_sizes() == JNI_ERR) { 4569 return JNI_ERR; 4570 } 4571 4572 #if defined(IA32) && !defined(ZERO) 4573 // Need to ensure we've determined the process's initial stack to 4574 // perform the workaround 4575 Linux::capture_initial_stack(JavaThread::stack_size_at_create()); 4576 workaround_expand_exec_shield_cs_limit(); 4577 #else 4578 suppress_primordial_thread_resolution = Arguments::created_by_java_launcher(); 4579 if (!suppress_primordial_thread_resolution) { 4580 Linux::capture_initial_stack(JavaThread::stack_size_at_create()); 4581 } 4582 #endif 4583 4584 Linux::libpthread_init(); 4585 Linux::sched_getcpu_init(); 4586 log_info(os)("HotSpot is running with %s, %s", 4587 Linux::libc_version(), Linux::libpthread_version()); 4588 4589 if (UseNUMA || UseNUMAInterleaving) { 4590 Linux::numa_init(); 4591 } 4592 4593 if (MaxFDLimit) { 4594 // set the number of file descriptors to max. print out error 4595 // if getrlimit/setrlimit fails but continue regardless. 4596 struct rlimit nbr_files; 4597 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4598 if (status != 0) { 4599 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); 4600 } else { 4601 nbr_files.rlim_cur = nbr_files.rlim_max; 4602 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 4603 if (status != 0) { 4604 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); 4605 } 4606 } 4607 } 4608 4609 // at-exit methods are called in the reverse order of their registration. 4610 // atexit functions are called on return from main or as a result of a 4611 // call to exit(3C). There can be only 32 of these functions registered 4612 // and atexit() does not set errno. 4613 4614 if (PerfAllowAtExitRegistration) { 4615 // only register atexit functions if PerfAllowAtExitRegistration is set. 4616 // atexit functions can be delayed until process exit time, which 4617 // can be problematic for embedded VM situations. Embedded VMs should 4618 // call DestroyJavaVM() to assure that VM resources are released. 4619 4620 // note: perfMemory_exit_helper atexit function may be removed in 4621 // the future if the appropriate cleanup code can be added to the 4622 // VM_Exit VMOperation's doit method. 4623 if (atexit(perfMemory_exit_helper) != 0) { 4624 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4625 } 4626 } 4627 4628 // initialize thread priority policy 4629 prio_init(); 4630 4631 if (!FLAG_IS_DEFAULT(AllocateHeapAt)) { 4632 set_coredump_filter(DAX_SHARED_BIT); 4633 } 4634 4635 if (DumpPrivateMappingsInCore) { 4636 set_coredump_filter(FILE_BACKED_PVT_BIT); 4637 } 4638 4639 if (DumpSharedMappingsInCore) { 4640 set_coredump_filter(FILE_BACKED_SHARED_BIT); 4641 } 4642 4643 if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) { 4644 // Disable code cache flushing to ensure the map file written at 4645 // exit contains all nmethods generated during execution. 4646 FLAG_SET_DEFAULT(UseCodeCacheFlushing, false); 4647 } 4648 4649 return JNI_OK; 4650 } 4651 4652 // older glibc versions don't have this macro (which expands to 4653 // an optimized bit-counting function) so we have to roll our own 4654 #ifndef CPU_COUNT 4655 4656 static int _cpu_count(const cpu_set_t* cpus) { 4657 int count = 0; 4658 // only look up to the number of configured processors 4659 for (int i = 0; i < os::processor_count(); i++) { 4660 if (CPU_ISSET(i, cpus)) { 4661 count++; 4662 } 4663 } 4664 return count; 4665 } 4666 4667 #define CPU_COUNT(cpus) _cpu_count(cpus) 4668 4669 #endif // CPU_COUNT 4670 4671 // Get the current number of available processors for this process. 4672 // This value can change at any time during a process's lifetime. 4673 // sched_getaffinity gives an accurate answer as it accounts for cpusets. 4674 // If it appears there may be more than 1024 processors then we do a 4675 // dynamic check - see 6515172 for details. 4676 // If anything goes wrong we fallback to returning the number of online 4677 // processors - which can be greater than the number available to the process. 4678 static int get_active_processor_count() { 4679 // Note: keep this function, with its CPU_xx macros, *outside* the os namespace (see JDK-8289477). 4680 cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors 4681 cpu_set_t* cpus_p = &cpus; 4682 int cpus_size = sizeof(cpu_set_t); 4683 4684 int configured_cpus = os::processor_count(); // upper bound on available cpus 4685 int cpu_count = 0; 4686 4687 // old build platforms may not support dynamic cpu sets 4688 #ifdef CPU_ALLOC 4689 4690 // To enable easy testing of the dynamic path on different platforms we 4691 // introduce a diagnostic flag: UseCpuAllocPath 4692 if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) { 4693 // kernel may use a mask bigger than cpu_set_t 4694 log_trace(os)("active_processor_count: using dynamic path %s" 4695 "- configured processors: %d", 4696 UseCpuAllocPath ? "(forced) " : "", 4697 configured_cpus); 4698 cpus_p = CPU_ALLOC(configured_cpus); 4699 if (cpus_p != NULL) { 4700 cpus_size = CPU_ALLOC_SIZE(configured_cpus); 4701 // zero it just to be safe 4702 CPU_ZERO_S(cpus_size, cpus_p); 4703 } 4704 else { 4705 // failed to allocate so fallback to online cpus 4706 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); 4707 log_trace(os)("active_processor_count: " 4708 "CPU_ALLOC failed (%s) - using " 4709 "online processor count: %d", 4710 os::strerror(errno), online_cpus); 4711 return online_cpus; 4712 } 4713 } 4714 else { 4715 log_trace(os)("active_processor_count: using static path - configured processors: %d", 4716 configured_cpus); 4717 } 4718 #else // CPU_ALLOC 4719 // these stubs won't be executed 4720 #define CPU_COUNT_S(size, cpus) -1 4721 #define CPU_FREE(cpus) 4722 4723 log_trace(os)("active_processor_count: only static path available - configured processors: %d", 4724 configured_cpus); 4725 #endif // CPU_ALLOC 4726 4727 // pid 0 means the current thread - which we have to assume represents the process 4728 if (sched_getaffinity(0, cpus_size, cpus_p) == 0) { 4729 if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used 4730 cpu_count = CPU_COUNT_S(cpus_size, cpus_p); 4731 } 4732 else { 4733 cpu_count = CPU_COUNT(cpus_p); 4734 } 4735 log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count); 4736 } 4737 else { 4738 cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN); 4739 warning("sched_getaffinity failed (%s)- using online processor count (%d) " 4740 "which may exceed available processors", os::strerror(errno), cpu_count); 4741 } 4742 4743 if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used 4744 CPU_FREE(cpus_p); 4745 } 4746 4747 assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check"); 4748 return cpu_count; 4749 } 4750 4751 int os::Linux::active_processor_count() { 4752 return get_active_processor_count(); 4753 } 4754 4755 // Determine the active processor count from one of 4756 // three different sources: 4757 // 4758 // 1. User option -XX:ActiveProcessorCount 4759 // 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN) 4760 // 3. extracted from cgroup cpu subsystem (shares and quotas) 4761 // 4762 // Option 1, if specified, will always override. 4763 // If the cgroup subsystem is active and configured, we 4764 // will return the min of the cgroup and option 2 results. 4765 // This is required since tools, such as numactl, that 4766 // alter cpu affinity do not update cgroup subsystem 4767 // cpuset configuration files. 4768 int os::active_processor_count() { 4769 // User has overridden the number of active processors 4770 if (ActiveProcessorCount > 0) { 4771 log_trace(os)("active_processor_count: " 4772 "active processor count set by user : %d", 4773 ActiveProcessorCount); 4774 return ActiveProcessorCount; 4775 } 4776 4777 int active_cpus; 4778 if (OSContainer::is_containerized()) { 4779 active_cpus = OSContainer::active_processor_count(); 4780 log_trace(os)("active_processor_count: determined by OSContainer: %d", 4781 active_cpus); 4782 } else { 4783 active_cpus = os::Linux::active_processor_count(); 4784 } 4785 4786 return active_cpus; 4787 } 4788 4789 static bool should_warn_invalid_processor_id() { 4790 if (os::processor_count() == 1) { 4791 // Don't warn if we only have one processor 4792 return false; 4793 } 4794 4795 static volatile int warn_once = 1; 4796 4797 if (Atomic::load(&warn_once) == 0 || 4798 Atomic::xchg(&warn_once, 0) == 0) { 4799 // Don't warn more than once 4800 return false; 4801 } 4802 4803 return true; 4804 } 4805 4806 uint os::processor_id() { 4807 const int id = Linux::sched_getcpu(); 4808 4809 if (id < processor_count()) { 4810 return (uint)id; 4811 } 4812 4813 // Some environments (e.g. openvz containers and the rr debugger) incorrectly 4814 // report a processor id that is higher than the number of processors available. 4815 // This is problematic, for example, when implementing CPU-local data structures, 4816 // where the processor id is used to index into an array of length processor_count(). 4817 // If this happens we return 0 here. This is is safe since we always have at least 4818 // one processor, but it's not optimal for performance if we're actually executing 4819 // in an environment with more than one processor. 4820 if (should_warn_invalid_processor_id()) { 4821 log_warning(os)("Invalid processor id reported by the operating system " 4822 "(got processor id %d, valid processor id range is 0-%d)", 4823 id, processor_count() - 1); 4824 log_warning(os)("Falling back to assuming processor id is 0. " 4825 "This could have a negative impact on performance."); 4826 } 4827 4828 return 0; 4829 } 4830 4831 void os::set_native_thread_name(const char *name) { 4832 if (Linux::_pthread_setname_np) { 4833 char buf [16]; // according to glibc manpage, 16 chars incl. '/0' 4834 snprintf(buf, sizeof(buf), "%s", name); 4835 buf[sizeof(buf) - 1] = '\0'; 4836 const int rc = Linux::_pthread_setname_np(pthread_self(), buf); 4837 // ERANGE should not happen; all other errors should just be ignored. 4838 assert(rc != ERANGE, "pthread_setname_np failed"); 4839 } 4840 } 4841 4842 bool os::bind_to_processor(uint processor_id) { 4843 // Not yet implemented. 4844 return false; 4845 } 4846 4847 //////////////////////////////////////////////////////////////////////////////// 4848 // debug support 4849 4850 bool os::find(address addr, outputStream* st) { 4851 Dl_info dlinfo; 4852 memset(&dlinfo, 0, sizeof(dlinfo)); 4853 if (dladdr(addr, &dlinfo) != 0) { 4854 st->print(PTR_FORMAT ": ", p2i(addr)); 4855 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { 4856 st->print("%s+" PTR_FORMAT, dlinfo.dli_sname, 4857 p2i(addr) - p2i(dlinfo.dli_saddr)); 4858 } else if (dlinfo.dli_fbase != NULL) { 4859 st->print("<offset " PTR_FORMAT ">", p2i(addr) - p2i(dlinfo.dli_fbase)); 4860 } else { 4861 st->print("<absolute address>"); 4862 } 4863 if (dlinfo.dli_fname != NULL) { 4864 st->print(" in %s", dlinfo.dli_fname); 4865 } 4866 if (dlinfo.dli_fbase != NULL) { 4867 st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase)); 4868 } 4869 st->cr(); 4870 4871 if (Verbose) { 4872 // decode some bytes around the PC 4873 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 4874 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 4875 address lowest = (address) dlinfo.dli_sname; 4876 if (!lowest) lowest = (address) dlinfo.dli_fbase; 4877 if (begin < lowest) begin = lowest; 4878 Dl_info dlinfo2; 4879 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr 4880 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) { 4881 end = (address) dlinfo2.dli_saddr; 4882 } 4883 Disassembler::decode(begin, end, st); 4884 } 4885 return true; 4886 } 4887 return false; 4888 } 4889 4890 //////////////////////////////////////////////////////////////////////////////// 4891 // misc 4892 4893 // This does not do anything on Linux. This is basically a hook for being 4894 // able to use structured exception handling (thread-local exception filters) 4895 // on, e.g., Win32. 4896 void 4897 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, 4898 JavaCallArguments* args, JavaThread* thread) { 4899 f(value, method, args, thread); 4900 } 4901 4902 // This code originates from JDK's sysOpen and open64_w 4903 // from src/solaris/hpi/src/system_md.c 4904 4905 int os::open(const char *path, int oflag, int mode) { 4906 if (strlen(path) > MAX_PATH - 1) { 4907 errno = ENAMETOOLONG; 4908 return -1; 4909 } 4910 4911 // All file descriptors that are opened in the Java process and not 4912 // specifically destined for a subprocess should have the close-on-exec 4913 // flag set. If we don't set it, then careless 3rd party native code 4914 // might fork and exec without closing all appropriate file descriptors 4915 // (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in 4916 // turn might: 4917 // 4918 // - cause end-of-file to fail to be detected on some file 4919 // descriptors, resulting in mysterious hangs, or 4920 // 4921 // - might cause an fopen in the subprocess to fail on a system 4922 // suffering from bug 1085341. 4923 // 4924 // (Yes, the default setting of the close-on-exec flag is a Unix 4925 // design flaw) 4926 // 4927 // See: 4928 // 1085341: 32-bit stdio routines should support file descriptors >255 4929 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 4930 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 4931 // 4932 // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open(). 4933 // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor 4934 // because it saves a system call and removes a small window where the flag 4935 // is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored 4936 // and we fall back to using FD_CLOEXEC (see below). 4937 #ifdef O_CLOEXEC 4938 oflag |= O_CLOEXEC; 4939 #endif 4940 4941 int fd = ::open64(path, oflag, mode); 4942 if (fd == -1) return -1; 4943 4944 //If the open succeeded, the file might still be a directory 4945 { 4946 struct stat64 buf64; 4947 int ret = ::fstat64(fd, &buf64); 4948 int st_mode = buf64.st_mode; 4949 4950 if (ret != -1) { 4951 if ((st_mode & S_IFMT) == S_IFDIR) { 4952 errno = EISDIR; 4953 ::close(fd); 4954 return -1; 4955 } 4956 } else { 4957 ::close(fd); 4958 return -1; 4959 } 4960 } 4961 4962 #ifdef FD_CLOEXEC 4963 // Validate that the use of the O_CLOEXEC flag on open above worked. 4964 // With recent kernels, we will perform this check exactly once. 4965 static sig_atomic_t O_CLOEXEC_is_known_to_work = 0; 4966 if (!O_CLOEXEC_is_known_to_work) { 4967 int flags = ::fcntl(fd, F_GETFD); 4968 if (flags != -1) { 4969 if ((flags & FD_CLOEXEC) != 0) 4970 O_CLOEXEC_is_known_to_work = 1; 4971 else 4972 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 4973 } 4974 } 4975 #endif 4976 4977 return fd; 4978 } 4979 4980 4981 // create binary file, rewriting existing file if required 4982 int os::create_binary_file(const char* path, bool rewrite_existing) { 4983 int oflags = O_WRONLY | O_CREAT; 4984 oflags |= rewrite_existing ? O_TRUNC : O_EXCL; 4985 return ::open64(path, oflags, S_IREAD | S_IWRITE); 4986 } 4987 4988 // return current position of file pointer 4989 jlong os::current_file_offset(int fd) { 4990 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 4991 } 4992 4993 // move file pointer to the specified offset 4994 jlong os::seek_to_file_offset(int fd, jlong offset) { 4995 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 4996 } 4997 4998 // This code originates from JDK's sysAvailable 4999 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c 5000 5001 int os::available(int fd, jlong *bytes) { 5002 jlong cur, end; 5003 int mode; 5004 struct stat64 buf64; 5005 5006 if (::fstat64(fd, &buf64) >= 0) { 5007 mode = buf64.st_mode; 5008 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5009 int n; 5010 if (::ioctl(fd, FIONREAD, &n) >= 0) { 5011 *bytes = n; 5012 return 1; 5013 } 5014 } 5015 } 5016 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5017 return 0; 5018 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5019 return 0; 5020 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5021 return 0; 5022 } 5023 *bytes = end - cur; 5024 return 1; 5025 } 5026 5027 // Map a block of memory. 5028 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 5029 char *addr, size_t bytes, bool read_only, 5030 bool allow_exec) { 5031 int prot; 5032 int flags = MAP_PRIVATE; 5033 5034 if (read_only) { 5035 prot = PROT_READ; 5036 } else { 5037 prot = PROT_READ | PROT_WRITE; 5038 } 5039 5040 if (allow_exec) { 5041 prot |= PROT_EXEC; 5042 } 5043 5044 if (addr != NULL) { 5045 flags |= MAP_FIXED; 5046 } 5047 5048 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5049 fd, file_offset); 5050 if (mapped_address == MAP_FAILED) { 5051 return NULL; 5052 } 5053 return mapped_address; 5054 } 5055 5056 5057 // Remap a block of memory. 5058 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 5059 char *addr, size_t bytes, bool read_only, 5060 bool allow_exec) { 5061 // same as map_memory() on this OS 5062 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5063 allow_exec); 5064 } 5065 5066 5067 // Unmap a block of memory. 5068 bool os::pd_unmap_memory(char* addr, size_t bytes) { 5069 return munmap(addr, bytes) == 0; 5070 } 5071 5072 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); 5073 5074 static jlong fast_cpu_time(Thread *thread) { 5075 clockid_t clockid; 5076 int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(), 5077 &clockid); 5078 if (rc == 0) { 5079 return os::Linux::fast_thread_cpu_time(clockid); 5080 } else { 5081 // It's possible to encounter a terminated native thread that failed 5082 // to detach itself from the VM - which should result in ESRCH. 5083 assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed"); 5084 return -1; 5085 } 5086 } 5087 5088 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5089 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5090 // of a thread. 5091 // 5092 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 5093 // the fast estimate available on the platform. 5094 5095 jlong os::current_thread_cpu_time() { 5096 if (os::Linux::supports_fast_thread_cpu_time()) { 5097 return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); 5098 } else { 5099 // return user + sys since the cost is the same 5100 return slow_thread_cpu_time(Thread::current(), true /* user + sys */); 5101 } 5102 } 5103 5104 jlong os::thread_cpu_time(Thread* thread) { 5105 // consistent with what current_thread_cpu_time() returns 5106 if (os::Linux::supports_fast_thread_cpu_time()) { 5107 return fast_cpu_time(thread); 5108 } else { 5109 return slow_thread_cpu_time(thread, true /* user + sys */); 5110 } 5111 } 5112 5113 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5114 if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { 5115 return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); 5116 } else { 5117 return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time); 5118 } 5119 } 5120 5121 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5122 if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { 5123 return fast_cpu_time(thread); 5124 } else { 5125 return slow_thread_cpu_time(thread, user_sys_cpu_time); 5126 } 5127 } 5128 5129 // -1 on error. 5130 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5131 pid_t tid = thread->osthread()->thread_id(); 5132 char *s; 5133 char stat[2048]; 5134 int statlen; 5135 char proc_name[64]; 5136 int count; 5137 long sys_time, user_time; 5138 char cdummy; 5139 int idummy; 5140 long ldummy; 5141 FILE *fp; 5142 5143 snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid); 5144 fp = fopen(proc_name, "r"); 5145 if (fp == NULL) return -1; 5146 statlen = fread(stat, 1, 2047, fp); 5147 stat[statlen] = '\0'; 5148 fclose(fp); 5149 5150 // Skip pid and the command string. Note that we could be dealing with 5151 // weird command names, e.g. user could decide to rename java launcher 5152 // to "java 1.4.2 :)", then the stat file would look like 5153 // 1234 (java 1.4.2 :)) R ... ... 5154 // We don't really need to know the command string, just find the last 5155 // occurrence of ")" and then start parsing from there. See bug 4726580. 5156 s = strrchr(stat, ')'); 5157 if (s == NULL) return -1; 5158 5159 // Skip blank chars 5160 do { s++; } while (s && isspace(*s)); 5161 5162 count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu", 5163 &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy, 5164 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy, 5165 &user_time, &sys_time); 5166 if (count != 13) return -1; 5167 if (user_sys_cpu_time) { 5168 return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); 5169 } else { 5170 return (jlong)user_time * (1000000000 / clock_tics_per_sec); 5171 } 5172 } 5173 5174 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5175 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5176 info_ptr->may_skip_backward = false; // elapsed time not wall time 5177 info_ptr->may_skip_forward = false; // elapsed time not wall time 5178 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 5179 } 5180 5181 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5182 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5183 info_ptr->may_skip_backward = false; // elapsed time not wall time 5184 info_ptr->may_skip_forward = false; // elapsed time not wall time 5185 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 5186 } 5187 5188 bool os::is_thread_cpu_time_supported() { 5189 return true; 5190 } 5191 5192 // System loadavg support. Returns -1 if load average cannot be obtained. 5193 // Linux doesn't yet have a (official) notion of processor sets, 5194 // so just return the system wide load average. 5195 int os::loadavg(double loadavg[], int nelem) { 5196 return ::getloadavg(loadavg, nelem); 5197 } 5198 5199 void os::pause() { 5200 char filename[MAX_PATH]; 5201 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5202 jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); 5203 } else { 5204 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5205 } 5206 5207 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5208 if (fd != -1) { 5209 struct stat buf; 5210 ::close(fd); 5211 while (::stat(filename, &buf) == 0) { 5212 (void)::poll(NULL, 0, 100); 5213 } 5214 } else { 5215 jio_fprintf(stderr, 5216 "Could not open pause file '%s', continuing immediately.\n", filename); 5217 } 5218 } 5219 5220 // Get the default path to the core file 5221 // Returns the length of the string 5222 int os::get_core_path(char* buffer, size_t bufferSize) { 5223 /* 5224 * Max length of /proc/sys/kernel/core_pattern is 128 characters. 5225 * See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt 5226 */ 5227 const int core_pattern_len = 129; 5228 char core_pattern[core_pattern_len] = {0}; 5229 5230 int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY); 5231 if (core_pattern_file == -1) { 5232 return -1; 5233 } 5234 5235 ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len); 5236 ::close(core_pattern_file); 5237 if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') { 5238 return -1; 5239 } 5240 if (core_pattern[ret-1] == '\n') { 5241 core_pattern[ret-1] = '\0'; 5242 } else { 5243 core_pattern[ret] = '\0'; 5244 } 5245 5246 // Replace the %p in the core pattern with the process id. NOTE: we do this 5247 // only if the pattern doesn't start with "|", and we support only one %p in 5248 // the pattern. 5249 char *pid_pos = strstr(core_pattern, "%p"); 5250 const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p" 5251 int written; 5252 5253 if (core_pattern[0] == '/') { 5254 if (pid_pos != NULL) { 5255 *pid_pos = '\0'; 5256 written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern, 5257 current_process_id(), tail); 5258 } else { 5259 written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); 5260 } 5261 } else { 5262 char cwd[PATH_MAX]; 5263 5264 const char* p = get_current_directory(cwd, PATH_MAX); 5265 if (p == NULL) { 5266 return -1; 5267 } 5268 5269 if (core_pattern[0] == '|') { 5270 written = jio_snprintf(buffer, bufferSize, 5271 "\"%s\" (or dumping to %s/core.%d)", 5272 &core_pattern[1], p, current_process_id()); 5273 } else if (pid_pos != NULL) { 5274 *pid_pos = '\0'; 5275 written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern, 5276 current_process_id(), tail); 5277 } else { 5278 written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern); 5279 } 5280 } 5281 5282 if (written < 0) { 5283 return -1; 5284 } 5285 5286 if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) { 5287 int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY); 5288 5289 if (core_uses_pid_file != -1) { 5290 char core_uses_pid = 0; 5291 ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1); 5292 ::close(core_uses_pid_file); 5293 5294 if (core_uses_pid == '1') { 5295 jio_snprintf(buffer + written, bufferSize - written, 5296 ".%d", current_process_id()); 5297 } 5298 } 5299 } 5300 5301 return strlen(buffer); 5302 } 5303 5304 bool os::start_debugging(char *buf, int buflen) { 5305 int len = (int)strlen(buf); 5306 char *p = &buf[len]; 5307 5308 jio_snprintf(p, buflen-len, 5309 "\n\n" 5310 "Do you want to debug the problem?\n\n" 5311 "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n" 5312 "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n" 5313 "Otherwise, press RETURN to abort...", 5314 os::current_process_id(), os::current_process_id(), 5315 os::current_thread_id(), os::current_thread_id()); 5316 5317 bool yes = os::message_box("Unexpected Error", buf); 5318 5319 if (yes) { 5320 // yes, user asked VM to launch debugger 5321 jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d", 5322 os::current_process_id(), os::current_process_id()); 5323 5324 os::fork_and_exec(buf); 5325 yes = false; 5326 } 5327 return yes; 5328 } 5329 5330 5331 // Java/Compiler thread: 5332 // 5333 // Low memory addresses 5334 // P0 +------------------------+ 5335 // | |\ Java thread created by VM does not have glibc 5336 // | glibc guard page | - guard page, attached Java thread usually has 5337 // | |/ 1 glibc guard page. 5338 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() 5339 // | |\ 5340 // | HotSpot Guard Pages | - red, yellow and reserved pages 5341 // | |/ 5342 // +------------------------+ StackOverflow::stack_reserved_zone_base() 5343 // | |\ 5344 // | Normal Stack | - 5345 // | |/ 5346 // P2 +------------------------+ Thread::stack_base() 5347 // 5348 // Non-Java thread: 5349 // 5350 // Low memory addresses 5351 // P0 +------------------------+ 5352 // | |\ 5353 // | glibc guard page | - usually 1 page 5354 // | |/ 5355 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() 5356 // | |\ 5357 // | Normal Stack | - 5358 // | |/ 5359 // P2 +------------------------+ Thread::stack_base() 5360 // 5361 // ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size 5362 // returned from pthread_attr_getstack(). 5363 // ** Due to NPTL implementation error, linux takes the glibc guard page out 5364 // of the stack size given in pthread_attr. We work around this for 5365 // threads created by the VM. (We adapt bottom to be P1 and size accordingly.) 5366 // 5367 #ifndef ZERO 5368 static void current_stack_region(address * bottom, size_t * size) { 5369 if (os::is_primordial_thread()) { 5370 // primordial thread needs special handling because pthread_getattr_np() 5371 // may return bogus value. 5372 *bottom = os::Linux::initial_thread_stack_bottom(); 5373 *size = os::Linux::initial_thread_stack_size(); 5374 } else { 5375 pthread_attr_t attr; 5376 5377 int rslt = pthread_getattr_np(pthread_self(), &attr); 5378 5379 // JVM needs to know exact stack location, abort if it fails 5380 if (rslt != 0) { 5381 if (rslt == ENOMEM) { 5382 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); 5383 } else { 5384 fatal("pthread_getattr_np failed with error = %d", rslt); 5385 } 5386 } 5387 5388 if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { 5389 fatal("Cannot locate current stack attributes!"); 5390 } 5391 5392 // Work around NPTL stack guard error. 5393 size_t guard_size = 0; 5394 rslt = pthread_attr_getguardsize(&attr, &guard_size); 5395 if (rslt != 0) { 5396 fatal("pthread_attr_getguardsize failed with error = %d", rslt); 5397 } 5398 *bottom += guard_size; 5399 *size -= guard_size; 5400 5401 pthread_attr_destroy(&attr); 5402 5403 } 5404 assert(os::current_stack_pointer() >= *bottom && 5405 os::current_stack_pointer() < *bottom + *size, "just checking"); 5406 } 5407 5408 address os::current_stack_base() { 5409 address bottom; 5410 size_t size; 5411 current_stack_region(&bottom, &size); 5412 return (bottom + size); 5413 } 5414 5415 size_t os::current_stack_size() { 5416 // This stack size includes the usable stack and HotSpot guard pages 5417 // (for the threads that have Hotspot guard pages). 5418 address bottom; 5419 size_t size; 5420 current_stack_region(&bottom, &size); 5421 return size; 5422 } 5423 #endif 5424 5425 static inline struct timespec get_mtime(const char* filename) { 5426 struct stat st; 5427 int ret = os::stat(filename, &st); 5428 assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno)); 5429 return st.st_mtim; 5430 } 5431 5432 int os::compare_file_modified_times(const char* file1, const char* file2) { 5433 struct timespec filetime1 = get_mtime(file1); 5434 struct timespec filetime2 = get_mtime(file2); 5435 int diff = filetime1.tv_sec - filetime2.tv_sec; 5436 if (diff == 0) { 5437 return filetime1.tv_nsec - filetime2.tv_nsec; 5438 } 5439 return diff; 5440 } 5441 5442 bool os::supports_map_sync() { 5443 return true; 5444 } 5445 5446 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) { 5447 // Note: all ranges are "[..)" 5448 unsigned long long start = (unsigned long long)addr; 5449 unsigned long long end = start + bytes; 5450 FILE* f = ::fopen("/proc/self/maps", "r"); 5451 int num_found = 0; 5452 if (f != NULL) { 5453 st->print_cr("Range [%llx-%llx) contains: ", start, end); 5454 char line[512]; 5455 while(fgets(line, sizeof(line), f) == line) { 5456 unsigned long long segment_start = 0; 5457 unsigned long long segment_end = 0; 5458 if (::sscanf(line, "%llx-%llx", &segment_start, &segment_end) == 2) { 5459 // Lets print out every range which touches ours. 5460 if (segment_start < end && segment_end > start) { 5461 num_found ++; 5462 st->print("%s", line); // line includes \n 5463 } 5464 } 5465 } 5466 ::fclose(f); 5467 if (num_found == 0) { 5468 st->print_cr("nothing."); 5469 } 5470 st->cr(); 5471 } 5472 } --- EOF ---