1 /*
   2  * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/classLoader.hpp"
  26 #include "jvm.h"
  27 #include "jvmtifiles/jvmti.h"
  28 #include "logging/log.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "os_posix.inline.hpp"
  31 #include "runtime/globals_extension.hpp"
  32 #include "runtime/osThread.hpp"
  33 #include "runtime/frame.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "services/attachListener.hpp"
  37 #include "services/memTracker.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 #include "runtime/park.hpp"
  43 #include "runtime/perfMemory.hpp"
  44 #include "utilities/align.hpp"
  45 #include "utilities/defaultStream.hpp"
  46 #include "utilities/events.hpp"
  47 #include "utilities/formatBuffer.hpp"
  48 #include "utilities/globalDefinitions.hpp"
  49 #include "utilities/macros.hpp"
  50 #include "utilities/vmError.hpp"
  51 #ifdef AIX
  52 #include "loadlib_aix.hpp"
  53 #include "os_aix.hpp"
  54 #endif
  55 #ifdef LINUX
  56 #include "os_linux.hpp"
  57 #endif
  58 
  59 #include <dirent.h>
  60 #include <dlfcn.h>
  61 #include <grp.h>
  62 #include <locale.h>
  63 #include <netdb.h>
  64 #include <pwd.h>
  65 #include <pthread.h>
  66 #include <signal.h>
  67 #include <sys/mman.h>
  68 #include <sys/resource.h>
  69 #include <sys/socket.h>
  70 #include <spawn.h>
  71 #include <sys/time.h>
  72 #include <sys/times.h>
  73 #include <sys/types.h>
  74 #include <sys/utsname.h>
  75 #include <sys/wait.h>
  76 #include <time.h>
  77 #include <unistd.h>
  78 #include <utmpx.h>
  79 
  80 #ifdef __APPLE__
  81   #include <crt_externs.h>
  82 #endif
  83 
  84 #define ROOT_UID 0
  85 
  86 #ifndef MAP_ANONYMOUS
  87   #define MAP_ANONYMOUS MAP_ANON
  88 #endif
  89 
  90 #define check_with_errno(check_type, cond, msg)                             \
  91   do {                                                                      \
  92     int err = errno;                                                        \
  93     check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err),   \
  94                os::errno_name(err));                                        \
  95 } while (false)
  96 
  97 #define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
  98 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
  99 
 100 static jlong initial_time_count = 0;
 101 
 102 static int clock_tics_per_sec = 100;
 103 
 104 // Platform minimum stack allowed
 105 size_t os::_os_min_stack_allowed = PTHREAD_STACK_MIN;
 106 
 107 // Check core dump limit and report possible place where core can be found
 108 void os::check_dump_limit(char* buffer, size_t bufferSize) {
 109   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
 110     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
 111     VMError::record_coredump_status(buffer, false);
 112     return;
 113   }
 114 
 115   int n;
 116   struct rlimit rlim;
 117   bool success;
 118 
 119   char core_path[PATH_MAX];
 120   n = get_core_path(core_path, PATH_MAX);
 121 
 122   if (n <= 0) {
 123     jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
 124     success = true;
 125 #ifdef LINUX
 126   } else if (core_path[0] == '"') { // redirect to user process
 127     jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
 128     success = true;
 129 #endif
 130   } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
 131     jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
 132     success = true;
 133   } else {
 134     switch(rlim.rlim_cur) {
 135       case RLIM_INFINITY:
 136         jio_snprintf(buffer, bufferSize, "%s", core_path);
 137         success = true;
 138         break;
 139       case 0:
 140         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
 141         success = false;
 142         break;
 143       default:
 144         jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " k). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / K);
 145         success = true;
 146         break;
 147     }
 148   }
 149 
 150   VMError::record_coredump_status(buffer, success);
 151 }
 152 
 153 int os::get_native_stack(address* stack, int frames, int toSkip) {
 154   int frame_idx = 0;
 155   int num_of_frames;  // number of frames captured
 156   frame fr = os::current_frame();
 157   while (fr.pc() && frame_idx < frames) {
 158     if (toSkip > 0) {
 159       toSkip --;
 160     } else {
 161       stack[frame_idx ++] = fr.pc();
 162     }
 163     if (fr.fp() == nullptr || fr.cb() != nullptr ||
 164         fr.sender_pc() == nullptr || os::is_first_C_frame(&fr)) break;
 165 
 166     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
 167       fr = os::get_sender_for_C_frame(&fr);
 168     } else {
 169       break;
 170     }
 171   }
 172   num_of_frames = frame_idx;
 173   for (; frame_idx < frames; frame_idx ++) {
 174     stack[frame_idx] = nullptr;
 175   }
 176 
 177   return num_of_frames;
 178 }
 179 
 180 int os::get_last_error() {
 181   return errno;
 182 }
 183 
 184 size_t os::lasterror(char *buf, size_t len) {
 185   if (errno == 0)  return 0;
 186 
 187   const char *s = os::strerror(errno);
 188   size_t n = ::strlen(s);
 189   if (n >= len) {
 190     n = len - 1;
 191   }
 192   ::strncpy(buf, s, n);
 193   buf[n] = '\0';
 194   return n;
 195 }
 196 
 197 // Return true if user is running as root.
 198 bool os::have_special_privileges() {
 199   static bool privileges = (getuid() != geteuid()) || (getgid() != getegid());
 200   return privileges;
 201 }
 202 
 203 void os::wait_for_keypress_at_exit(void) {
 204   // don't do anything on posix platforms
 205   return;
 206 }
 207 
 208 int os::create_file_for_heap(const char* dir) {
 209   int fd;
 210 
 211 #if defined(LINUX) && defined(O_TMPFILE)
 212   char* native_dir = os::strdup(dir);
 213   if (native_dir == nullptr) {
 214     vm_exit_during_initialization(err_msg("strdup failed during creation of backing file for heap (%s)", os::strerror(errno)));
 215     return -1;
 216   }
 217   os::native_path(native_dir);
 218   fd = os::open(dir, O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR);
 219   os::free(native_dir);
 220 
 221   if (fd == -1)
 222 #endif
 223   {
 224     const char name_template[] = "/jvmheap.XXXXXX";
 225 
 226     size_t fullname_len = strlen(dir) + strlen(name_template);
 227     char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
 228     if (fullname == nullptr) {
 229       vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
 230       return -1;
 231     }
 232     int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
 233     assert((size_t)n == fullname_len, "Unexpected number of characters in string");
 234 
 235     os::native_path(fullname);
 236 
 237     // create a new file.
 238     fd = mkstemp(fullname);
 239 
 240     if (fd < 0) {
 241       warning("Could not create file for heap with template %s", fullname);
 242       os::free(fullname);
 243       return -1;
 244     } else {
 245       // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
 246       int ret = unlink(fullname);
 247       assert_with_errno(ret == 0, "unlink returned error");
 248     }
 249 
 250     os::free(fullname);
 251   }
 252 
 253   return fd;
 254 }
 255 
 256 // Is a (classpath) directory empty?
 257 bool os::dir_is_empty(const char* path) {
 258   DIR *dir = nullptr;
 259   struct dirent *ptr;
 260 
 261   dir = ::opendir(path);
 262   if (dir == nullptr) return true;
 263 
 264   // Scan the directory
 265   bool result = true;
 266   while (result && (ptr = ::readdir(dir)) != nullptr) {
 267     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
 268       result = false;
 269     }
 270   }
 271   ::closedir(dir);
 272   return result;
 273 }
 274 
 275 static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
 276   char * addr;
 277   int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
 278   if (requested_addr != nullptr) {
 279     assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
 280     flags |= MAP_FIXED;
 281   }
 282 
 283   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
 284   // touch an uncommitted page. Otherwise, the read/write might
 285   // succeed if we have enough swap space to back the physical page.
 286   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
 287                        flags, -1, 0);
 288 
 289   if (addr != MAP_FAILED) {
 290     MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
 291     return addr;
 292   }
 293   return nullptr;
 294 }
 295 
 296 static int util_posix_fallocate(int fd, off_t offset, off_t len) {
 297   static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
 298 #ifdef __APPLE__
 299   fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
 300   // First we try to get a continuous chunk of disk space
 301   int ret = fcntl(fd, F_PREALLOCATE, &store);
 302   if (ret == -1) {
 303     // Maybe we are too fragmented, try to allocate non-continuous range
 304     store.fst_flags = F_ALLOCATEALL;
 305     ret = fcntl(fd, F_PREALLOCATE, &store);
 306   }
 307   if(ret != -1) {
 308     return ftruncate(fd, len);
 309   }
 310   return -1;
 311 #else
 312   return posix_fallocate(fd, offset, len);
 313 #endif
 314 }
 315 
 316 // Map the given address range to the provided file descriptor.
 317 char* os::map_memory_to_file(char* base, size_t size, int fd) {
 318   assert(fd != -1, "File descriptor is not valid");
 319 
 320   // allocate space for the file
 321   int ret = util_posix_fallocate(fd, 0, (off_t)size);
 322   if (ret != 0) {
 323     vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret));
 324     return nullptr;
 325   }
 326 
 327   int prot = PROT_READ | PROT_WRITE;
 328   int flags = MAP_SHARED;
 329   if (base != nullptr) {
 330     flags |= MAP_FIXED;
 331   }
 332   char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
 333 
 334   if (addr == MAP_FAILED) {
 335     warning("Failed mmap to file. (%s)", os::strerror(errno));
 336     return nullptr;
 337   }
 338   if (base != nullptr && addr != base) {
 339     if (!os::release_memory(addr, size)) {
 340       warning("Could not release memory on unsuccessful file mapping");
 341     }
 342     return nullptr;
 343   }
 344   return addr;
 345 }
 346 
 347 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
 348   assert(fd != -1, "File descriptor is not valid");
 349   assert(base != nullptr, "Base cannot be null");
 350 
 351   return map_memory_to_file(base, size, fd);
 352 }
 353 
 354 static size_t calculate_aligned_extra_size(size_t size, size_t alignment) {
 355   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 356       "Alignment must be a multiple of allocation granularity (page size)");
 357   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 358 
 359   size_t extra_size = size + alignment;
 360   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 361   return extra_size;
 362 }
 363 
 364 // After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment.
 365 static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base, size_t extra_size) {
 366   // Do manual alignment
 367   char* aligned_base = align_up(extra_base, alignment);
 368 
 369   // [  |                                       |  ]
 370   // ^ extra_base
 371   //    ^ extra_base + begin_offset == aligned_base
 372   //     extra_base + begin_offset + size       ^
 373   //                       extra_base + extra_size ^
 374   // |<>| == begin_offset
 375   //                              end_offset == |<>|
 376   size_t begin_offset = aligned_base - extra_base;
 377   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 378 
 379   if (begin_offset > 0) {
 380       os::release_memory(extra_base, begin_offset);
 381   }
 382 
 383   if (end_offset > 0) {
 384       os::release_memory(extra_base + begin_offset + size, end_offset);
 385   }
 386 
 387   return aligned_base;
 388 }
 389 
 390 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 391 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 392 // rather than unmapping and remapping the whole chunk to get requested alignment.
 393 char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
 394   size_t extra_size = calculate_aligned_extra_size(size, alignment);
 395   char* extra_base = os::reserve_memory(extra_size, exec);
 396   if (extra_base == nullptr) {
 397     return nullptr;
 398   }
 399   return chop_extra_memory(size, alignment, extra_base, extra_size);
 400 }
 401 
 402 char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc) {
 403   size_t extra_size = calculate_aligned_extra_size(size, alignment);
 404   // For file mapping, we do not call os:map_memory_to_file(size,fd) since:
 405   // - we later chop away parts of the mapping using os::release_memory and that could fail if the
 406   //   original mmap call had been tied to an fd.
 407   // - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
 408   //   mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
 409   //   chopping off and unmapping excess bits back and front (see below) would not work.
 410   char* extra_base = reserve_mmapped_memory(extra_size, nullptr);
 411   if (extra_base == nullptr) {
 412     return nullptr;
 413   }
 414   char* aligned_base = chop_extra_memory(size, alignment, extra_base, extra_size);
 415   // After we have an aligned address, we can replace anonymous mapping with file mapping
 416   if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == nullptr) {
 417     vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
 418   }
 419   MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
 420   return aligned_base;
 421 }
 422 
 423 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 424   // All supported POSIX platforms provide C99 semantics.
 425   ALLOW_C_FUNCTION(::vsnprintf, int result = ::vsnprintf(buf, len, fmt, args);)
 426   // If an encoding error occurred (result < 0) then it's not clear
 427   // whether the buffer is NUL terminated, so ensure it is.
 428   if ((result < 0) && (len > 0)) {
 429     buf[len - 1] = '\0';
 430   }
 431   return result;
 432 }
 433 
 434 int os::get_fileno(FILE* fp) {
 435   return NOT_AIX(::)fileno(fp);
 436 }
 437 
 438 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 439   return gmtime_r(clock, res);
 440 }
 441 
 442 void os::Posix::print_load_average(outputStream* st) {
 443   st->print("load average: ");
 444   double loadavg[3];
 445   int res = os::loadavg(loadavg, 3);
 446   if (res != -1) {
 447     st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
 448   } else {
 449     st->print(" Unavailable");
 450   }
 451   st->cr();
 452 }
 453 
 454 // boot/uptime information;
 455 // unfortunately it does not work on macOS and Linux because the utx chain has no entry
 456 // for reboot at least on my test machines
 457 void os::Posix::print_uptime_info(outputStream* st) {
 458   int bootsec = -1;
 459   int currsec = time(nullptr);
 460   struct utmpx* ent;
 461   setutxent();
 462   while ((ent = getutxent())) {
 463     if (!strcmp("system boot", ent->ut_line)) {
 464       bootsec = ent->ut_tv.tv_sec;
 465       break;
 466     }
 467   }
 468 
 469   if (bootsec != -1) {
 470     os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
 471   }
 472 }
 473 
 474 static void print_rlimit(outputStream* st, const char* msg,
 475                          int resource, bool output_k = false) {
 476   struct rlimit rlim;
 477 
 478   st->print(" %s ", msg);
 479   int res = getrlimit(resource, &rlim);
 480   if (res == -1) {
 481     st->print("could not obtain value");
 482   } else {
 483     // soft limit
 484     if (rlim.rlim_cur == RLIM_INFINITY) { st->print("infinity"); }
 485     else {
 486       if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / K); }
 487       else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); }
 488     }
 489     // hard limit
 490     st->print("/");
 491     if (rlim.rlim_max == RLIM_INFINITY) { st->print("infinity"); }
 492     else {
 493       if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_max) / K); }
 494       else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_max)); }
 495     }
 496   }
 497 }
 498 
 499 void os::Posix::print_rlimit_info(outputStream* st) {
 500   st->print("rlimit (soft/hard):");
 501   print_rlimit(st, "STACK", RLIMIT_STACK, true);
 502   print_rlimit(st, ", CORE", RLIMIT_CORE, true);
 503 
 504 #if defined(AIX)
 505   st->print(", NPROC ");
 506   st->print("%d", sysconf(_SC_CHILD_MAX));
 507 
 508   print_rlimit(st, ", THREADS", RLIMIT_THREADS);
 509 #else
 510   print_rlimit(st, ", NPROC", RLIMIT_NPROC);
 511 #endif
 512 
 513   print_rlimit(st, ", NOFILE", RLIMIT_NOFILE);
 514   print_rlimit(st, ", AS", RLIMIT_AS, true);
 515   print_rlimit(st, ", CPU", RLIMIT_CPU);
 516   print_rlimit(st, ", DATA", RLIMIT_DATA, true);
 517 
 518   // maximum size of files that the process may create
 519   print_rlimit(st, ", FSIZE", RLIMIT_FSIZE, true);
 520 
 521 #if defined(LINUX) || defined(__APPLE__)
 522   // maximum number of bytes of memory that may be locked into RAM
 523   // (rounded down to the nearest  multiple of system pagesize)
 524   print_rlimit(st, ", MEMLOCK", RLIMIT_MEMLOCK, true);
 525 #endif
 526 
 527   // MacOS; The maximum size (in bytes) to which a process's resident set size may grow.
 528 #if defined(__APPLE__)
 529   print_rlimit(st, ", RSS", RLIMIT_RSS, true);
 530 #endif
 531 
 532   st->cr();
 533 }
 534 
 535 void os::Posix::print_uname_info(outputStream* st) {
 536   // kernel
 537   st->print("uname: ");
 538   struct utsname name;
 539   uname(&name);
 540   st->print("%s ", name.sysname);
 541 #ifdef ASSERT
 542   st->print("%s ", name.nodename);
 543 #endif
 544   st->print("%s ", name.release);
 545   st->print("%s ", name.version);
 546   st->print("%s", name.machine);
 547   st->cr();
 548 }
 549 
 550 void os::Posix::print_umask(outputStream* st, mode_t umsk) {
 551   st->print((umsk & S_IRUSR) ? "r" : "-");
 552   st->print((umsk & S_IWUSR) ? "w" : "-");
 553   st->print((umsk & S_IXUSR) ? "x" : "-");
 554   st->print((umsk & S_IRGRP) ? "r" : "-");
 555   st->print((umsk & S_IWGRP) ? "w" : "-");
 556   st->print((umsk & S_IXGRP) ? "x" : "-");
 557   st->print((umsk & S_IROTH) ? "r" : "-");
 558   st->print((umsk & S_IWOTH) ? "w" : "-");
 559   st->print((umsk & S_IXOTH) ? "x" : "-");
 560 }
 561 
 562 void os::print_user_info(outputStream* st) {
 563   unsigned id = (unsigned) ::getuid();
 564   st->print("uid  : %u ", id);
 565   id = (unsigned) ::geteuid();
 566   st->print("euid : %u ", id);
 567   id = (unsigned) ::getgid();
 568   st->print("gid  : %u ", id);
 569   id = (unsigned) ::getegid();
 570   st->print_cr("egid : %u", id);
 571   st->cr();
 572 
 573   mode_t umsk = ::umask(0);
 574   ::umask(umsk);
 575   st->print("umask: %04o (", (unsigned) umsk);
 576   os::Posix::print_umask(st, umsk);
 577   st->print_cr(")");
 578   st->cr();
 579 }
 580 
 581 // Print all active locale categories, one line each
 582 void os::print_active_locale(outputStream* st) {
 583   st->print_cr("Active Locale:");
 584   // Posix is quiet about how exactly LC_ALL is implemented.
 585   // Just print it out too, in case LC_ALL is held separately
 586   // from the individual categories.
 587   #define LOCALE_CAT_DO(f) \
 588     f(LC_ALL) \
 589     f(LC_COLLATE) \
 590     f(LC_CTYPE) \
 591     f(LC_MESSAGES) \
 592     f(LC_MONETARY) \
 593     f(LC_NUMERIC) \
 594     f(LC_TIME)
 595   #define XX(cat) { cat, #cat },
 596   const struct { int c; const char* name; } categories[] = {
 597       LOCALE_CAT_DO(XX)
 598       { -1, nullptr }
 599   };
 600   #undef XX
 601   #undef LOCALE_CAT_DO
 602   for (int i = 0; categories[i].c != -1; i ++) {
 603     const char* locale = setlocale(categories[i].c, nullptr);
 604     st->print_cr("%s=%s", categories[i].name,
 605                  ((locale != nullptr) ? locale : "<unknown>"));
 606   }
 607 }
 608 
 609 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
 610   // no prefix required
 611 }
 612 
 613 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
 614   // no suffix required
 615 }
 616 
 617 bool os::get_host_name(char* buf, size_t buflen) {
 618   struct utsname name;
 619   uname(&name);
 620   jio_snprintf(buf, buflen, "%s", name.nodename);
 621   return true;
 622 }
 623 
 624 #ifndef _LP64
 625 // Helper, on 32bit, for os::has_allocatable_memory_limit
 626 static bool is_allocatable(size_t s) {
 627   if (s < 2 * G) {
 628     return true;
 629   }
 630   // Use raw anonymous mmap here; no need to go through any
 631   // of our reservation layers. We will unmap right away.
 632   void* p = ::mmap(nullptr, s, PROT_NONE,
 633                    MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS, -1, 0);
 634   if (p == MAP_FAILED) {
 635     return false;
 636   } else {
 637     ::munmap(p, s);
 638     return true;
 639   }
 640 }
 641 #endif // !_LP64
 642 
 643 
 644 bool os::has_allocatable_memory_limit(size_t* limit) {
 645   struct rlimit rlim;
 646   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
 647   // if there was an error when calling getrlimit, assume that there is no limitation
 648   // on virtual memory.
 649   bool result;
 650   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
 651     result = false;
 652   } else {
 653     *limit = (size_t)rlim.rlim_cur;
 654     result = true;
 655   }
 656 #ifdef _LP64
 657   return result;
 658 #else
 659   // arbitrary virtual space limit for 32 bit Unices found by testing. If
 660   // getrlimit above returned a limit, bound it with this limit. Otherwise
 661   // directly use it.
 662   const size_t max_virtual_limit = 3800*M;
 663   if (result) {
 664     *limit = MIN2(*limit, max_virtual_limit);
 665   } else {
 666     *limit = max_virtual_limit;
 667   }
 668 
 669   // bound by actually allocatable memory. The algorithm uses two bounds, an
 670   // upper and a lower limit. The upper limit is the current highest amount of
 671   // memory that could not be allocated, the lower limit is the current highest
 672   // amount of memory that could be allocated.
 673   // The algorithm iteratively refines the result by halving the difference
 674   // between these limits, updating either the upper limit (if that value could
 675   // not be allocated) or the lower limit (if the that value could be allocated)
 676   // until the difference between these limits is "small".
 677 
 678   // the minimum amount of memory we care about allocating.
 679   const size_t min_allocation_size = M;
 680 
 681   size_t upper_limit = *limit;
 682 
 683   // first check a few trivial cases
 684   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
 685     *limit = upper_limit;
 686   } else if (!is_allocatable(min_allocation_size)) {
 687     // we found that not even min_allocation_size is allocatable. Return it
 688     // anyway. There is no point to search for a better value any more.
 689     *limit = min_allocation_size;
 690   } else {
 691     // perform the binary search.
 692     size_t lower_limit = min_allocation_size;
 693     while ((upper_limit - lower_limit) > min_allocation_size) {
 694       size_t temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
 695       temp_limit = align_down(temp_limit, min_allocation_size);
 696       if (is_allocatable(temp_limit)) {
 697         lower_limit = temp_limit;
 698       } else {
 699         upper_limit = temp_limit;
 700       }
 701     }
 702     *limit = lower_limit;
 703   }
 704   return true;
 705 #endif
 706 }
 707 
 708 void* os::get_default_process_handle() {
 709 #ifdef __APPLE__
 710   // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
 711   // to avoid finding unexpected symbols on second (or later)
 712   // loads of a library.
 713   return (void*)::dlopen(nullptr, RTLD_FIRST);
 714 #else
 715   return (void*)::dlopen(nullptr, RTLD_LAZY);
 716 #endif
 717 }
 718 
 719 void* os::dll_lookup(void* handle, const char* name) {
 720   return dlsym(handle, name);
 721 }
 722 
 723 void os::dll_unload(void *lib) {
 724   // os::Linux::dll_path returns a pointer to a string that is owned by the dynamic loader. Upon
 725   // calling dlclose the dynamic loader may free the memory containing the string, thus we need to
 726   // copy the string to be able to reference it after dlclose.
 727   const char* l_path = nullptr;
 728 #ifdef LINUX
 729   char* l_pathdup = nullptr;
 730   l_path = os::Linux::dll_path(lib);
 731   if (l_path != nullptr) {
 732     l_path = l_pathdup = os::strdup(l_path);
 733   }
 734 #endif  // LINUX
 735   if (l_path == nullptr) {
 736     l_path = "<not available>";
 737   }
 738 
 739   char ebuf[1024];
 740   bool res = os::pd_dll_unload(lib, ebuf, sizeof(ebuf));
 741 
 742   if (res) {
 743     Events::log_dll_message(nullptr, "Unloaded shared library \"%s\" [" INTPTR_FORMAT "]",
 744                             l_path, p2i(lib));
 745     log_info(os)("Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", l_path, p2i(lib));
 746   } else {
 747     Events::log_dll_message(nullptr, "Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
 748                             l_path, p2i(lib), ebuf);
 749     log_info(os)("Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
 750                   l_path, p2i(lib), ebuf);
 751   }
 752   LINUX_ONLY(os::free(l_pathdup));
 753 }
 754 
 755 jlong os::lseek(int fd, jlong offset, int whence) {
 756   return (jlong) ::lseek(fd, offset, whence);
 757 }
 758 
 759 int os::ftruncate(int fd, jlong length) {
 760    return ::ftruncate(fd, length);
 761 }
 762 
 763 const char* os::get_current_directory(char *buf, size_t buflen) {
 764   return getcwd(buf, buflen);
 765 }
 766 
 767 FILE* os::fdopen(int fd, const char* mode) {
 768   return ::fdopen(fd, mode);
 769 }
 770 
 771 ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) {
 772   ssize_t res;
 773   RESTARTABLE(::write(fd, buf, nBytes), res);
 774   return res;
 775 }
 776 
 777 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
 778   return ::pread(fd, buf, nBytes, offset);
 779 }
 780 
 781 void os::flockfile(FILE* fp) {
 782   ::flockfile(fp);
 783 }
 784 
 785 void os::funlockfile(FILE* fp) {
 786   ::funlockfile(fp);
 787 }
 788 
 789 DIR* os::opendir(const char* dirname) {
 790   assert(dirname != nullptr, "just checking");
 791   return ::opendir(dirname);
 792 }
 793 
 794 struct dirent* os::readdir(DIR* dirp) {
 795   assert(dirp != nullptr, "just checking");
 796   return ::readdir(dirp);
 797 }
 798 
 799 int os::closedir(DIR *dirp) {
 800   assert(dirp != nullptr, "just checking");
 801   return ::closedir(dirp);
 802 }
 803 
 804 int os::socket_close(int fd) {
 805   return ::close(fd);
 806 }
 807 
 808 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
 809   RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
 810 }
 811 
 812 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
 813   RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
 814 }
 815 
 816 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
 817   return os::send(fd, buf, nBytes, flags);
 818 }
 819 
 820 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
 821   RESTARTABLE_RETURN_INT(::connect(fd, him, len));
 822 }
 823 
 824 void os::exit(int num) {
 825   ALLOW_C_FUNCTION(::exit, ::exit(num);)
 826 }
 827 
 828 void os::_exit(int num) {
 829   ALLOW_C_FUNCTION(::_exit, ::_exit(num);)
 830 }
 831 
 832 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 833 // which is used to find statically linked in agents.
 834 // Parameters:
 835 //            sym_name: Symbol in library we are looking for
 836 //            lib_name: Name of library to look in, null for shared libs.
 837 //            is_absolute_path == true if lib_name is absolute path to agent
 838 //                                     such as "/a/b/libL.so"
 839 //            == false if only the base name of the library is passed in
 840 //               such as "L"
 841 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
 842                                     bool is_absolute_path) {
 843   char *agent_entry_name;
 844   size_t len;
 845   size_t name_len;
 846   size_t prefix_len = strlen(JNI_LIB_PREFIX);
 847   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
 848   const char *start;
 849 
 850   if (lib_name != nullptr) {
 851     name_len = strlen(lib_name);
 852     if (is_absolute_path) {
 853       // Need to strip path, prefix and suffix
 854       if ((start = strrchr(lib_name, *os::file_separator())) != nullptr) {
 855         lib_name = ++start;
 856       }
 857       if (strlen(lib_name) <= (prefix_len + suffix_len)) {
 858         return nullptr;
 859       }
 860       lib_name += prefix_len;
 861       name_len = strlen(lib_name) - suffix_len;
 862     }
 863   }
 864   len = (lib_name != nullptr ? name_len : 0) + strlen(sym_name) + 2;
 865   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
 866   if (agent_entry_name == nullptr) {
 867     return nullptr;
 868   }
 869   strcpy(agent_entry_name, sym_name);
 870   if (lib_name != nullptr) {
 871     strcat(agent_entry_name, "_");
 872     strncat(agent_entry_name, lib_name, name_len);
 873   }
 874   return agent_entry_name;
 875 }
 876 
 877 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 878 void os::infinite_sleep() {
 879   while (true) {    // sleep forever ...
 880     ::sleep(100);   // ... 100 seconds at a time
 881   }
 882 }
 883 
 884 void os::naked_short_nanosleep(jlong ns) {
 885   struct timespec req;
 886   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
 887   req.tv_sec = 0;
 888   req.tv_nsec = ns;
 889   ::nanosleep(&req, nullptr);
 890   return;
 891 }
 892 
 893 void os::naked_short_sleep(jlong ms) {
 894   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
 895   os::naked_short_nanosleep(millis_to_nanos(ms));
 896   return;
 897 }
 898 
 899 char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
 900   size_t stack_size = 0;
 901   size_t guard_size = 0;
 902   int detachstate = 0;
 903   pthread_attr_getstacksize(attr, &stack_size);
 904   pthread_attr_getguardsize(attr, &guard_size);
 905   // Work around glibc stack guard issue, see os::create_thread() in os_linux.cpp.
 906   LINUX_ONLY(if (os::Linux::adjustStackSizeForGuardPages()) stack_size -= guard_size;)
 907   pthread_attr_getdetachstate(attr, &detachstate);
 908   jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
 909     stack_size / K, guard_size / K,
 910     (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
 911   return buf;
 912 }
 913 
 914 char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
 915 
 916   if (filename == nullptr || outbuf == nullptr || outbuflen < 1) {
 917     assert(false, "os::Posix::realpath: invalid arguments.");
 918     errno = EINVAL;
 919     return nullptr;
 920   }
 921 
 922   char* result = nullptr;
 923 
 924   // This assumes platform realpath() is implemented according to POSIX.1-2008.
 925   // POSIX.1-2008 allows to specify null for the output buffer, in which case
 926   // output buffer is dynamically allocated and must be ::free()'d by the caller.
 927   ALLOW_C_FUNCTION(::realpath, char* p = ::realpath(filename, nullptr);)
 928   if (p != nullptr) {
 929     if (strlen(p) < outbuflen) {
 930       strcpy(outbuf, p);
 931       result = outbuf;
 932     } else {
 933       errno = ENAMETOOLONG;
 934     }
 935     ALLOW_C_FUNCTION(::free, ::free(p);) // *not* os::free
 936   } else {
 937     // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
 938     // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
 939     // that it complains about the null we handed down as user buffer.
 940     // In this case, use the user provided buffer but at least check whether realpath caused
 941     // a memory overwrite.
 942     if (errno == EINVAL) {
 943       outbuf[outbuflen - 1] = '\0';
 944       ALLOW_C_FUNCTION(::realpath, p = ::realpath(filename, outbuf);)
 945       if (p != nullptr) {
 946         guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
 947         result = p;
 948       }
 949     }
 950   }
 951   return result;
 952 
 953 }
 954 
 955 int os::stat(const char *path, struct stat *sbuf) {
 956   return ::stat(path, sbuf);
 957 }
 958 
 959 char * os::native_path(char *path) {
 960   return path;
 961 }
 962 
 963 bool os::same_files(const char* file1, const char* file2) {
 964   if (file1 == nullptr && file2 == nullptr) {
 965     return true;
 966   }
 967 
 968   if (file1 == nullptr || file2 == nullptr) {
 969     return false;
 970   }
 971 
 972   if (strcmp(file1, file2) == 0) {
 973     return true;
 974   }
 975 
 976   bool is_same = false;
 977   struct stat st1;
 978   struct stat st2;
 979 
 980   if (os::stat(file1, &st1) < 0) {
 981     return false;
 982   }
 983 
 984   if (os::stat(file2, &st2) < 0) {
 985     return false;
 986   }
 987 
 988   if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) {
 989     // same files
 990     is_same = true;
 991   }
 992   return is_same;
 993 }
 994 
 995 // Called when creating the thread.  The minimum stack sizes have already been calculated
 996 size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
 997   size_t stack_size;
 998   if (req_stack_size == 0) {
 999     stack_size = default_stack_size(thr_type);
1000   } else {
1001     stack_size = req_stack_size;
1002   }
1003 
1004   switch (thr_type) {
1005   case os::java_thread:
1006     // Java threads use ThreadStackSize which default value can be
1007     // changed with the flag -Xss
1008     if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1009       // no requested size and we have a more specific default value
1010       stack_size = JavaThread::stack_size_at_create();
1011     }
1012     stack_size = MAX2(stack_size,
1013                       _java_thread_min_stack_allowed);
1014     break;
1015   case os::compiler_thread:
1016     if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1017       // no requested size and we have a more specific default value
1018       stack_size = (size_t)(CompilerThreadStackSize * K);
1019     }
1020     stack_size = MAX2(stack_size,
1021                       _compiler_thread_min_stack_allowed);
1022     break;
1023   case os::vm_thread:
1024   case os::gc_thread:
1025   case os::watcher_thread:
1026   default:  // presume the unknown thr_type is a VM internal
1027     if (req_stack_size == 0 && VMThreadStackSize > 0) {
1028       // no requested size and we have a more specific default value
1029       stack_size = (size_t)(VMThreadStackSize * K);
1030     }
1031 
1032     stack_size = MAX2(stack_size,
1033                       _vm_internal_thread_min_stack_allowed);
1034     break;
1035   }
1036 
1037   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1038   // Be careful not to round up to 0. Align down in that case.
1039   if (stack_size <= SIZE_MAX - vm_page_size()) {
1040     stack_size = align_up(stack_size, vm_page_size());
1041   } else {
1042     stack_size = align_down(stack_size, vm_page_size());
1043   }
1044 
1045   return stack_size;
1046 }
1047 
1048 #ifndef ZERO
1049 #ifndef ARM
1050 static bool get_frame_at_stack_banging_point(JavaThread* thread, address pc, const void* ucVoid, frame* fr) {
1051   if (Interpreter::contains(pc)) {
1052     // interpreter performs stack banging after the fixed frame header has
1053     // been generated while the compilers perform it before. To maintain
1054     // semantic consistency between interpreted and compiled frames, the
1055     // method returns the Java sender of the current frame.
1056     *fr = os::fetch_frame_from_context(ucVoid);
1057     if (!fr->is_first_java_frame()) {
1058       // get_frame_at_stack_banging_point() is only called when we
1059       // have well defined stacks so java_sender() calls do not need
1060       // to assert safe_for_sender() first.
1061       *fr = fr->java_sender();
1062     }
1063   } else {
1064     // more complex code with compiled code
1065     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
1066     CodeBlob* cb = CodeCache::find_blob(pc);
1067     if (cb == nullptr || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
1068       // Not sure where the pc points to, fallback to default
1069       // stack overflow handling
1070       return false;
1071     } else {
1072       // in compiled code, the stack banging is performed just after the return pc
1073       // has been pushed on the stack
1074       *fr = os::fetch_compiled_frame_from_context(ucVoid);
1075       if (!fr->is_java_frame()) {
1076         assert(!fr->is_first_frame(), "Safety check");
1077         // See java_sender() comment above.
1078         *fr = fr->java_sender();
1079       }
1080     }
1081   }
1082   assert(fr->is_java_frame(), "Safety check");
1083   return true;
1084 }
1085 #endif // ARM
1086 
1087 // This return true if the signal handler should just continue, ie. return after calling this
1088 bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address pc,
1089                                       const void* ucVoid, address* stub) {
1090   // stack overflow
1091   StackOverflow* overflow_state = thread->stack_overflow_state();
1092   if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
1093     if (thread->thread_state() == _thread_in_Java) {
1094 #ifndef ARM
1095       // arm32 doesn't have this
1096       // vthreads don't support this
1097       if (!thread->is_vthread_mounted() && overflow_state->in_stack_reserved_zone(addr)) {
1098         frame fr;
1099         if (get_frame_at_stack_banging_point(thread, pc, ucVoid, &fr)) {
1100           assert(fr.is_java_frame(), "Must be a Java frame");
1101           frame activation =
1102             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
1103           if (activation.sp() != nullptr) {
1104             overflow_state->disable_stack_reserved_zone();
1105             if (activation.is_interpreted_frame()) {
1106               overflow_state->set_reserved_stack_activation((address)(activation.fp()
1107                 // Some platforms use frame pointers for interpreter frames, others use initial sp.
1108 #if !defined(PPC64) && !defined(S390)
1109                 + frame::interpreter_frame_initial_sp_offset
1110 #endif
1111                 ));
1112             } else {
1113               overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
1114             }
1115             return true; // just continue
1116           }
1117         }
1118       }
1119 #endif // ARM
1120       // Throw a stack overflow exception.  Guard pages will be re-enabled
1121       // while unwinding the stack.
1122       overflow_state->disable_stack_yellow_reserved_zone();
1123       *stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
1124     } else {
1125       // Thread was in the vm or native code.  Return and try to finish.
1126       overflow_state->disable_stack_yellow_reserved_zone();
1127       return true; // just continue
1128     }
1129   } else if (overflow_state->in_stack_red_zone(addr)) {
1130     // Fatal red zone violation. Disable the guard pages and keep
1131     // on handling the signal.
1132     overflow_state->disable_stack_red_zone();
1133     tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
1134 
1135     // This is a likely cause, but hard to verify. Let's just print
1136     // it as a hint.
1137     tty->print_raw_cr("Please check if any of your loaded .so files has "
1138                       "enabled executable stack (see man page execstack(8))");
1139 
1140   } else {
1141 #ifdef LINUX
1142     // This only works with os::Linux::manually_expand_stack()
1143 
1144     // Accessing stack address below sp may cause SEGV if current
1145     // thread has MAP_GROWSDOWN stack. This should only happen when
1146     // current thread was created by user code with MAP_GROWSDOWN flag
1147     // and then attached to VM. See notes in os_linux.cpp.
1148     if (thread->osthread()->expanding_stack() == 0) {
1149        thread->osthread()->set_expanding_stack();
1150        if (os::Linux::manually_expand_stack(thread, addr)) {
1151          thread->osthread()->clear_expanding_stack();
1152          return true; // just continue
1153        }
1154        thread->osthread()->clear_expanding_stack();
1155     } else {
1156        fatal("recursive segv. expanding stack.");
1157     }
1158 #else
1159     tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
1160 #endif // LINUX
1161   }
1162   return false;
1163 }
1164 #endif // ZERO
1165 
1166 bool os::Posix::is_root(uid_t uid){
1167     return ROOT_UID == uid;
1168 }
1169 
1170 bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
1171     return is_root(uid) || geteuid() == uid;
1172 }
1173 
1174 bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
1175     return is_root(uid) || (geteuid() == uid && getegid() == gid);
1176 }
1177 
1178 // Shared clock/time and other supporting routines for pthread_mutex/cond
1179 // initialization. This is enabled on Solaris but only some of the clock/time
1180 // functionality is actually used there.
1181 
1182 // Shared condattr object for use with relative timed-waits. Will be associated
1183 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1184 // but otherwise whatever default is used by the platform - generally the
1185 // time-of-day clock.
1186 static pthread_condattr_t _condAttr[1];
1187 
1188 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1189 // all systems (e.g. FreeBSD) map the default to "normal".
1190 static pthread_mutexattr_t _mutexAttr[1];
1191 
1192 // common basic initialization that is always supported
1193 static void pthread_init_common(void) {
1194   int status;
1195   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1196     fatal("pthread_condattr_init: %s", os::strerror(status));
1197   }
1198   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1199     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1200   }
1201   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1202     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1203   }
1204   PlatformMutex::init();
1205 }
1206 
1207 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = nullptr;
1208 
1209 static bool _use_clock_monotonic_condattr = false;
1210 
1211 // Determine what POSIX API's are present and do appropriate
1212 // configuration.
1213 void os::Posix::init(void) {
1214 #if defined(_ALLBSD_SOURCE)
1215   clock_tics_per_sec = CLK_TCK;
1216 #else
1217   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
1218 #endif
1219   // NOTE: no logging available when this is called. Put logging
1220   // statements in init_2().
1221 
1222   // Check for pthread_condattr_setclock support.
1223 
1224   // libpthread is already loaded.
1225   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1226     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1227                                                    "pthread_condattr_setclock");
1228   if (condattr_setclock_func != nullptr) {
1229     _pthread_condattr_setclock = condattr_setclock_func;
1230   }
1231 
1232   // Now do general initialization.
1233 
1234   pthread_init_common();
1235 
1236   int status;
1237   if (_pthread_condattr_setclock != nullptr) {
1238     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1239       if (status == EINVAL) {
1240         _use_clock_monotonic_condattr = false;
1241         warning("Unable to use monotonic clock with relative timed-waits" \
1242                 " - changes to the time-of-day clock may have adverse affects");
1243       } else {
1244         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1245       }
1246     } else {
1247       _use_clock_monotonic_condattr = true;
1248     }
1249   }
1250 
1251   initial_time_count = javaTimeNanos();
1252 }
1253 
1254 void os::Posix::init_2(void) {
1255   log_info(os)("Use of CLOCK_MONOTONIC is supported");
1256   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1257                (_pthread_condattr_setclock != nullptr ? "" : " not"));
1258   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1259                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1260 }
1261 
1262 // Utility to convert the given timeout to an absolute timespec
1263 // (based on the appropriate clock) to use with pthread_cond_timewait,
1264 // and sem_timedwait().
1265 // The clock queried here must be the clock used to manage the
1266 // timeout of the condition variable or semaphore.
1267 //
1268 // The passed in timeout value is either a relative time in nanoseconds
1269 // or an absolute time in milliseconds. A relative timeout will be
1270 // associated with CLOCK_MONOTONIC if available, unless the real-time clock
1271 // is explicitly requested; otherwise, or if absolute,
1272 // the default time-of-day clock will be used.
1273 
1274 // Given time is a 64-bit value and the time_t used in the timespec is
1275 // sometimes a signed-32-bit value we have to watch for overflow if times
1276 // way in the future are given. Further on Solaris versions
1277 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1278 // number of seconds, in abstime, is less than current_time + 100000000.
1279 // As it will be over 20 years before "now + 100000000" will overflow we can
1280 // ignore overflow and just impose a hard-limit on seconds using the value
1281 // of "now + 100000000". This places a limit on the timeout of about 3.17
1282 // years from "now".
1283 //
1284 #define MAX_SECS 100000000
1285 
1286 // Calculate a new absolute time that is "timeout" nanoseconds from "now".
1287 // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1288 // on which clock API is being used).
1289 static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1290                           jlong now_part_sec, jlong unit) {
1291   time_t max_secs = now_sec + MAX_SECS;
1292 
1293   jlong seconds = timeout / NANOUNITS;
1294   timeout %= NANOUNITS; // remaining nanos
1295 
1296   if (seconds >= MAX_SECS) {
1297     // More seconds than we can add, so pin to max_secs.
1298     abstime->tv_sec = max_secs;
1299     abstime->tv_nsec = 0;
1300   } else {
1301     abstime->tv_sec = now_sec  + seconds;
1302     long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1303     if (nanos >= NANOUNITS) { // overflow
1304       abstime->tv_sec += 1;
1305       nanos -= NANOUNITS;
1306     }
1307     abstime->tv_nsec = nanos;
1308   }
1309 }
1310 
1311 // Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1312 // The current time in seconds is also passed in to enforce an upper bound as discussed above.
1313 static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1314   time_t max_secs = now_sec + MAX_SECS;
1315 
1316   jlong seconds = deadline / MILLIUNITS;
1317   jlong millis = deadline % MILLIUNITS;
1318 
1319   if (seconds >= max_secs) {
1320     // Absolute seconds exceeds allowed max, so pin to max_secs.
1321     abstime->tv_sec = max_secs;
1322     abstime->tv_nsec = 0;
1323   } else {
1324     abstime->tv_sec = seconds;
1325     abstime->tv_nsec = millis_to_nanos(millis);
1326   }
1327 }
1328 
1329 static jlong millis_to_nanos_bounded(jlong millis) {
1330   // We have to watch for overflow when converting millis to nanos,
1331   // but if millis is that large then we will end up limiting to
1332   // MAX_SECS anyway, so just do that here.
1333   if (millis / MILLIUNITS > MAX_SECS) {
1334     millis = jlong(MAX_SECS) * MILLIUNITS;
1335   }
1336   return millis_to_nanos(millis);
1337 }
1338 
1339 static void to_abstime(timespec* abstime, jlong timeout,
1340                        bool isAbsolute, bool isRealtime) {
1341   DEBUG_ONLY(int max_secs = MAX_SECS;)
1342 
1343   if (timeout < 0) {
1344     timeout = 0;
1345   }
1346 
1347   clockid_t clock = CLOCK_MONOTONIC;
1348   if (isAbsolute || (!_use_clock_monotonic_condattr || isRealtime)) {
1349     clock = CLOCK_REALTIME;
1350   }
1351 
1352   struct timespec now;
1353   int status = clock_gettime(clock, &now);
1354   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1355 
1356   if (!isAbsolute) {
1357     calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1358   } else {
1359     unpack_abs_time(abstime, timeout, now.tv_sec);
1360   }
1361   DEBUG_ONLY(max_secs += now.tv_sec;)
1362 
1363   assert(abstime->tv_sec >= 0, "tv_sec < 0");
1364   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1365   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1366   assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1367 }
1368 
1369 // Create an absolute time 'millis' milliseconds in the future, using the
1370 // real-time (time-of-day) clock. Used by PosixSemaphore.
1371 void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) {
1372   to_abstime(abstime, millis_to_nanos_bounded(millis),
1373              false /* not absolute */,
1374              true  /* use real-time clock */);
1375 }
1376 
1377 // Common (partly) shared time functions
1378 
1379 jlong os::javaTimeMillis() {
1380   struct timespec ts;
1381   int status = clock_gettime(CLOCK_REALTIME, &ts);
1382   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1383   return jlong(ts.tv_sec) * MILLIUNITS +
1384     jlong(ts.tv_nsec) / NANOUNITS_PER_MILLIUNIT;
1385 }
1386 
1387 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1388   struct timespec ts;
1389   int status = clock_gettime(CLOCK_REALTIME, &ts);
1390   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1391   seconds = jlong(ts.tv_sec);
1392   nanos = jlong(ts.tv_nsec);
1393 }
1394 
1395 // macOS and AIX have platform specific implementations for javaTimeNanos()
1396 // using native clock/timer access APIs. These have historically worked well
1397 // for those platforms, but it may be possible for them to switch to the
1398 // generic clock_gettime mechanism in the future.
1399 #if !defined(__APPLE__) && !defined(AIX)
1400 
1401 jlong os::javaTimeNanos() {
1402   struct timespec tp;
1403   int status = clock_gettime(CLOCK_MONOTONIC, &tp);
1404   assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1405   jlong result = jlong(tp.tv_sec) * NANOSECS_PER_SEC + jlong(tp.tv_nsec);
1406   return result;
1407 }
1408 
1409 // for timer info max values which include all bits
1410 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
1411 
1412 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1413   // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1414   info_ptr->max_value = ALL_64_BITS;
1415   info_ptr->may_skip_backward = false;      // not subject to resetting or drifting
1416   info_ptr->may_skip_forward = false;       // not subject to resetting or drifting
1417   info_ptr->kind = JVMTI_TIMER_ELAPSED;     // elapsed not CPU time
1418 }
1419 #endif // ! APPLE && !AIX
1420 
1421 // Time since start-up in seconds to a fine granularity.
1422 double os::elapsedTime() {
1423   return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1424 }
1425 
1426 jlong os::elapsed_counter() {
1427   return os::javaTimeNanos() - initial_time_count;
1428 }
1429 
1430 jlong os::elapsed_frequency() {
1431   return NANOSECS_PER_SEC; // nanosecond resolution
1432 }
1433 
1434 bool os::supports_vtime() { return true; }
1435 
1436 // Return the real, user, and system times in seconds from an
1437 // arbitrary fixed point in the past.
1438 bool os::getTimesSecs(double* process_real_time,
1439                       double* process_user_time,
1440                       double* process_system_time) {
1441   struct tms ticks;
1442   clock_t real_ticks = times(&ticks);
1443 
1444   if (real_ticks == (clock_t) (-1)) {
1445     return false;
1446   } else {
1447     double ticks_per_second = (double) clock_tics_per_sec;
1448     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1449     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1450     *process_real_time = ((double) real_ticks) / ticks_per_second;
1451 
1452     return true;
1453   }
1454 }
1455 
1456 char * os::local_time_string(char *buf, size_t buflen) {
1457   struct tm t;
1458   time_t long_time;
1459   time(&long_time);
1460   localtime_r(&long_time, &t);
1461   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1462                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1463                t.tm_hour, t.tm_min, t.tm_sec);
1464   return buf;
1465 }
1466 
1467 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
1468   return localtime_r(clock, res);
1469 }
1470 
1471 // PlatformEvent
1472 //
1473 // Assumption:
1474 //    Only one parker can exist on an event, which is why we allocate
1475 //    them per-thread. Multiple unparkers can coexist.
1476 //
1477 // _event serves as a restricted-range semaphore.
1478 //   -1 : thread is blocked, i.e. there is a waiter
1479 //    0 : neutral: thread is running or ready,
1480 //        could have been signaled after a wait started
1481 //    1 : signaled - thread is running or ready
1482 //
1483 //    Having three states allows for some detection of bad usage - see
1484 //    comments on unpark().
1485 
1486 PlatformEvent::PlatformEvent() {
1487   int status = pthread_cond_init(_cond, _condAttr);
1488   assert_status(status == 0, status, "cond_init");
1489   status = pthread_mutex_init(_mutex, _mutexAttr);
1490   assert_status(status == 0, status, "mutex_init");
1491   _event   = 0;
1492   _nParked = 0;
1493 }
1494 
1495 void PlatformEvent::park() {       // AKA "down()"
1496   // Transitions for _event:
1497   //   -1 => -1 : illegal
1498   //    1 =>  0 : pass - return immediately
1499   //    0 => -1 : block; then set _event to 0 before returning
1500 
1501   // Invariant: Only the thread associated with the PlatformEvent
1502   // may call park().
1503   assert(_nParked == 0, "invariant");
1504 
1505   int v;
1506 
1507   // atomically decrement _event
1508   for (;;) {
1509     v = _event;
1510     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1511   }
1512   guarantee(v >= 0, "invariant");
1513 
1514   if (v == 0) { // Do this the hard way by blocking ...
1515     int status = pthread_mutex_lock(_mutex);
1516     assert_status(status == 0, status, "mutex_lock");
1517     guarantee(_nParked == 0, "invariant");
1518     ++_nParked;
1519     while (_event < 0) {
1520       // OS-level "spurious wakeups" are ignored
1521       status = pthread_cond_wait(_cond, _mutex);
1522       assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1523                     status, "cond_wait");
1524     }
1525     --_nParked;
1526 
1527     _event = 0;
1528     status = pthread_mutex_unlock(_mutex);
1529     assert_status(status == 0, status, "mutex_unlock");
1530     // Paranoia to ensure our locked and lock-free paths interact
1531     // correctly with each other.
1532     OrderAccess::fence();
1533   }
1534   guarantee(_event >= 0, "invariant");
1535 }
1536 
1537 int PlatformEvent::park(jlong millis) {
1538   return park_nanos(millis_to_nanos_bounded(millis));
1539 }
1540 
1541 int PlatformEvent::park_nanos(jlong nanos) {
1542   assert(nanos > 0, "nanos are positive");
1543 
1544   // Transitions for _event:
1545   //   -1 => -1 : illegal
1546   //    1 =>  0 : pass - return immediately
1547   //    0 => -1 : block; then set _event to 0 before returning
1548 
1549   // Invariant: Only the thread associated with the Event/PlatformEvent
1550   // may call park().
1551   assert(_nParked == 0, "invariant");
1552 
1553   int v;
1554   // atomically decrement _event
1555   for (;;) {
1556     v = _event;
1557     if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1558   }
1559   guarantee(v >= 0, "invariant");
1560 
1561   if (v == 0) { // Do this the hard way by blocking ...
1562     struct timespec abst;
1563     to_abstime(&abst, nanos, false, false);
1564 
1565     int ret = OS_TIMEOUT;
1566     int status = pthread_mutex_lock(_mutex);
1567     assert_status(status == 0, status, "mutex_lock");
1568     guarantee(_nParked == 0, "invariant");
1569     ++_nParked;
1570 
1571     while (_event < 0) {
1572       status = pthread_cond_timedwait(_cond, _mutex, &abst);
1573       assert_status(status == 0 || status == ETIMEDOUT,
1574                     status, "cond_timedwait");
1575       // OS-level "spurious wakeups" are ignored
1576       if (status == ETIMEDOUT) break;
1577     }
1578     --_nParked;
1579 
1580     if (_event >= 0) {
1581       ret = OS_OK;
1582     }
1583 
1584     _event = 0;
1585     status = pthread_mutex_unlock(_mutex);
1586     assert_status(status == 0, status, "mutex_unlock");
1587     // Paranoia to ensure our locked and lock-free paths interact
1588     // correctly with each other.
1589     OrderAccess::fence();
1590     return ret;
1591   }
1592   return OS_OK;
1593 }
1594 
1595 void PlatformEvent::unpark() {
1596   // Transitions for _event:
1597   //    0 => 1 : just return
1598   //    1 => 1 : just return
1599   //   -1 => either 0 or 1; must signal target thread
1600   //         That is, we can safely transition _event from -1 to either
1601   //         0 or 1.
1602   // See also: "Semaphores in Plan 9" by Mullender & Cox
1603   //
1604   // Note: Forcing a transition from "-1" to "1" on an unpark() means
1605   // that it will take two back-to-back park() calls for the owning
1606   // thread to block. This has the benefit of forcing a spurious return
1607   // from the first park() call after an unpark() call which will help
1608   // shake out uses of park() and unpark() without checking state conditions
1609   // properly. This spurious return doesn't manifest itself in any user code
1610   // but only in the correctly written condition checking loops of ObjectMonitor,
1611   // Mutex/Monitor, and JavaThread::sleep
1612 
1613   if (Atomic::xchg(&_event, 1) >= 0) return;
1614 
1615   int status = pthread_mutex_lock(_mutex);
1616   assert_status(status == 0, status, "mutex_lock");
1617   int anyWaiters = _nParked;
1618   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1619   status = pthread_mutex_unlock(_mutex);
1620   assert_status(status == 0, status, "mutex_unlock");
1621 
1622   // Note that we signal() *after* dropping the lock for "immortal" Events.
1623   // This is safe and avoids a common class of futile wakeups.  In rare
1624   // circumstances this can cause a thread to return prematurely from
1625   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1626   // will simply re-test the condition and re-park itself.
1627   // This provides particular benefit if the underlying platform does not
1628   // provide wait morphing.
1629 
1630   if (anyWaiters != 0) {
1631     status = pthread_cond_signal(_cond);
1632     assert_status(status == 0, status, "cond_signal");
1633   }
1634 }
1635 
1636 // JSR166 support
1637 
1638  PlatformParker::PlatformParker() : _counter(0), _cur_index(-1) {
1639   int status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1640   assert_status(status == 0, status, "cond_init rel");
1641   status = pthread_cond_init(&_cond[ABS_INDEX], nullptr);
1642   assert_status(status == 0, status, "cond_init abs");
1643   status = pthread_mutex_init(_mutex, _mutexAttr);
1644   assert_status(status == 0, status, "mutex_init");
1645 }
1646 
1647 PlatformParker::~PlatformParker() {
1648   int status = pthread_cond_destroy(&_cond[REL_INDEX]);
1649   assert_status(status == 0, status, "cond_destroy rel");
1650   status = pthread_cond_destroy(&_cond[ABS_INDEX]);
1651   assert_status(status == 0, status, "cond_destroy abs");
1652   status = pthread_mutex_destroy(_mutex);
1653   assert_status(status == 0, status, "mutex_destroy");
1654 }
1655 
1656 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
1657 // sets count to 1 and signals condvar.  Only one thread ever waits
1658 // on the condvar. Contention seen when trying to park implies that someone
1659 // is unparking you, so don't wait. And spurious returns are fine, so there
1660 // is no need to track notifications.
1661 
1662 void Parker::park(bool isAbsolute, jlong time) {
1663 
1664   // Optional fast-path check:
1665   // Return immediately if a permit is available.
1666   // We depend on Atomic::xchg() having full barrier semantics
1667   // since we are doing a lock-free update to _counter.
1668   if (Atomic::xchg(&_counter, 0) > 0) return;
1669 
1670   JavaThread *jt = JavaThread::current();
1671 
1672   // Optional optimization -- avoid state transitions if there's
1673   // an interrupt pending.
1674   if (jt->is_interrupted(false)) {
1675     return;
1676   }
1677 
1678   // Next, demultiplex/decode time arguments
1679   struct timespec absTime;
1680   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1681     return;
1682   }
1683   if (time > 0) {
1684     to_abstime(&absTime, time, isAbsolute, false);
1685   }
1686 
1687   // Enter safepoint region
1688   // Beware of deadlocks such as 6317397.
1689   // The per-thread Parker:: mutex is a classic leaf-lock.
1690   // In particular a thread must never block on the Threads_lock while
1691   // holding the Parker:: mutex.  If safepoints are pending both the
1692   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1693   ThreadBlockInVM tbivm(jt);
1694 
1695   // Can't access interrupt state now that we are _thread_blocked. If we've
1696   // been interrupted since we checked above then _counter will be > 0.
1697 
1698   // Don't wait if cannot get lock since interference arises from
1699   // unparking.
1700   if (pthread_mutex_trylock(_mutex) != 0) {
1701     return;
1702   }
1703 
1704   int status;
1705   if (_counter > 0)  { // no wait needed
1706     _counter = 0;
1707     status = pthread_mutex_unlock(_mutex);
1708     assert_status(status == 0, status, "invariant");
1709     // Paranoia to ensure our locked and lock-free paths interact
1710     // correctly with each other and Java-level accesses.
1711     OrderAccess::fence();
1712     return;
1713   }
1714 
1715   OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
1716 
1717   assert(_cur_index == -1, "invariant");
1718   if (time == 0) {
1719     _cur_index = REL_INDEX; // arbitrary choice when not timed
1720     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1721     assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1722                   status, "cond_wait");
1723   }
1724   else {
1725     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1726     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1727     assert_status(status == 0 || status == ETIMEDOUT,
1728                   status, "cond_timedwait");
1729   }
1730   _cur_index = -1;
1731 
1732   _counter = 0;
1733   status = pthread_mutex_unlock(_mutex);
1734   assert_status(status == 0, status, "invariant");
1735   // Paranoia to ensure our locked and lock-free paths interact
1736   // correctly with each other and Java-level accesses.
1737   OrderAccess::fence();
1738 }
1739 
1740 void Parker::unpark() {
1741   int status = pthread_mutex_lock(_mutex);
1742   assert_status(status == 0, status, "invariant");
1743   const int s = _counter;
1744   _counter = 1;
1745   // must capture correct index before unlocking
1746   int index = _cur_index;
1747   status = pthread_mutex_unlock(_mutex);
1748   assert_status(status == 0, status, "invariant");
1749 
1750   // Note that we signal() *after* dropping the lock for "immortal" Events.
1751   // This is safe and avoids a common class of futile wakeups.  In rare
1752   // circumstances this can cause a thread to return prematurely from
1753   // cond_{timed}wait() but the spurious wakeup is benign and the victim
1754   // will simply re-test the condition and re-park itself.
1755   // This provides particular benefit if the underlying platform does not
1756   // provide wait morphing.
1757 
1758   if (s < 1 && index != -1) {
1759     // thread is definitely parked
1760     status = pthread_cond_signal(&_cond[index]);
1761     assert_status(status == 0, status, "invariant");
1762   }
1763 }
1764 
1765 // Platform Mutex/Monitor implementation
1766 
1767 #if PLATFORM_MONITOR_IMPL_INDIRECT
1768 
1769 PlatformMutex::Mutex::Mutex() : _next(nullptr) {
1770   int status = pthread_mutex_init(&_mutex, _mutexAttr);
1771   assert_status(status == 0, status, "mutex_init");
1772 }
1773 
1774 PlatformMutex::Mutex::~Mutex() {
1775   int status = pthread_mutex_destroy(&_mutex);
1776   assert_status(status == 0, status, "mutex_destroy");
1777 }
1778 
1779 pthread_mutex_t PlatformMutex::_freelist_lock;
1780 PlatformMutex::Mutex* PlatformMutex::_mutex_freelist = nullptr;
1781 
1782 void PlatformMutex::init() {
1783   int status = pthread_mutex_init(&_freelist_lock, _mutexAttr);
1784   assert_status(status == 0, status, "freelist lock init");
1785 }
1786 
1787 struct PlatformMutex::WithFreeListLocked : public StackObj {
1788   WithFreeListLocked() {
1789     int status = pthread_mutex_lock(&_freelist_lock);
1790     assert_status(status == 0, status, "freelist lock");
1791   }
1792 
1793   ~WithFreeListLocked() {
1794     int status = pthread_mutex_unlock(&_freelist_lock);
1795     assert_status(status == 0, status, "freelist unlock");
1796   }
1797 };
1798 
1799 PlatformMutex::PlatformMutex() {
1800   {
1801     WithFreeListLocked wfl;
1802     _impl = _mutex_freelist;
1803     if (_impl != nullptr) {
1804       _mutex_freelist = _impl->_next;
1805       _impl->_next = nullptr;
1806       return;
1807     }
1808   }
1809   _impl = new Mutex();
1810 }
1811 
1812 PlatformMutex::~PlatformMutex() {
1813   WithFreeListLocked wfl;
1814   assert(_impl->_next == nullptr, "invariant");
1815   _impl->_next = _mutex_freelist;
1816   _mutex_freelist = _impl;
1817 }
1818 
1819 PlatformMonitor::Cond::Cond() : _next(nullptr) {
1820   int status = pthread_cond_init(&_cond, _condAttr);
1821   assert_status(status == 0, status, "cond_init");
1822 }
1823 
1824 PlatformMonitor::Cond::~Cond() {
1825   int status = pthread_cond_destroy(&_cond);
1826   assert_status(status == 0, status, "cond_destroy");
1827 }
1828 
1829 PlatformMonitor::Cond* PlatformMonitor::_cond_freelist = nullptr;
1830 
1831 PlatformMonitor::PlatformMonitor() {
1832   {
1833     WithFreeListLocked wfl;
1834     _impl = _cond_freelist;
1835     if (_impl != nullptr) {
1836       _cond_freelist = _impl->_next;
1837       _impl->_next = nullptr;
1838       return;
1839     }
1840   }
1841   _impl = new Cond();
1842 }
1843 
1844 PlatformMonitor::~PlatformMonitor() {
1845   WithFreeListLocked wfl;
1846   assert(_impl->_next == nullptr, "invariant");
1847   _impl->_next = _cond_freelist;
1848   _cond_freelist = _impl;
1849 }
1850 
1851 #else
1852 
1853 PlatformMutex::PlatformMutex() {
1854   int status = pthread_mutex_init(&_mutex, _mutexAttr);
1855   assert_status(status == 0, status, "mutex_init");
1856 }
1857 
1858 PlatformMutex::~PlatformMutex() {
1859   int status = pthread_mutex_destroy(&_mutex);
1860   assert_status(status == 0, status, "mutex_destroy");
1861 }
1862 
1863 PlatformMonitor::PlatformMonitor() {
1864   int status = pthread_cond_init(&_cond, _condAttr);
1865   assert_status(status == 0, status, "cond_init");
1866 }
1867 
1868 PlatformMonitor::~PlatformMonitor() {
1869   int status = pthread_cond_destroy(&_cond);
1870   assert_status(status == 0, status, "cond_destroy");
1871 }
1872 
1873 #endif // PLATFORM_MONITOR_IMPL_INDIRECT
1874 
1875 // Must already be locked
1876 int PlatformMonitor::wait(uint64_t millis) {
1877   if (millis > 0) {
1878     struct timespec abst;
1879     // We have to watch for overflow when converting millis to nanos,
1880     // but if millis is that large then we will end up limiting to
1881     // MAX_SECS anyway, so just do that here. This also handles values
1882     // larger than int64_t max.
1883     if (millis / MILLIUNITS > MAX_SECS) {
1884       millis = uint64_t(MAX_SECS) * MILLIUNITS;
1885     }
1886     to_abstime(&abst, millis_to_nanos(int64_t(millis)), false, false);
1887 
1888     int ret = OS_TIMEOUT;
1889     int status = pthread_cond_timedwait(cond(), mutex(), &abst);
1890     assert_status(status == 0 || status == ETIMEDOUT,
1891                   status, "cond_timedwait");
1892     if (status == 0) {
1893       ret = OS_OK;
1894     }
1895     return ret;
1896   } else {
1897     int status = pthread_cond_wait(cond(), mutex());
1898     assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1899                   status, "cond_wait");
1900     return OS_OK;
1901   }
1902 }
1903 
1904 // Darwin has no "environ" in a dynamic library.
1905 #ifdef __APPLE__
1906   #define environ (*_NSGetEnviron())
1907 #else
1908   extern char** environ;
1909 #endif
1910 
1911 char** os::get_environ() { return environ; }
1912 
1913 // Run the specified command in a separate process. Return its exit value,
1914 // or -1 on failure (e.g. can't fork a new process).
1915 // Notes: -Unlike system(), this function can be called from signal handler. It
1916 //         doesn't block SIGINT et al.
1917 //        -this function is unsafe to use in non-error situations, mainly
1918 //         because the child process will inherit all parent descriptors.
1919 int os::fork_and_exec(const char* cmd) {
1920   const char* argv[4] = {"sh", "-c", cmd, nullptr};
1921   pid_t pid = -1;
1922   char** env = os::get_environ();
1923   // Note: cast is needed because posix_spawn() requires - for compatibility with ancient
1924   // C-code - a non-const argv/envp pointer array. But it is fine to hand in literal
1925   // strings and just cast the constness away. See also ProcessImpl_md.c.
1926   int rc = ::posix_spawn(&pid, "/bin/sh", nullptr, nullptr, (char**) argv, env);
1927   if (rc == 0) {
1928     int status;
1929     // Wait for the child process to exit.  This returns immediately if
1930     // the child has already exited. */
1931     while (::waitpid(pid, &status, 0) < 0) {
1932       switch (errno) {
1933       case ECHILD: return 0;
1934       case EINTR: break;
1935       default: return -1;
1936       }
1937     }
1938     if (WIFEXITED(status)) {
1939       // The child exited normally; get its exit code.
1940       return WEXITSTATUS(status);
1941     } else if (WIFSIGNALED(status)) {
1942       // The child exited because of a signal
1943       // The best value to return is 0x80 + signal number,
1944       // because that is what all Unix shells do, and because
1945       // it allows callers to distinguish between process exit and
1946       // process death by signal.
1947       return 0x80 + WTERMSIG(status);
1948     } else {
1949       // Unknown exit code; pass it through
1950       return status;
1951     }
1952   } else {
1953     // Don't log, we are inside error handling
1954     return -1;
1955   }
1956 }
1957 
1958 bool os::message_box(const char* title, const char* message) {
1959   int i;
1960   fdStream err(defaultStream::error_fd());
1961   for (i = 0; i < 78; i++) err.print_raw("=");
1962   err.cr();
1963   err.print_raw_cr(title);
1964   for (i = 0; i < 78; i++) err.print_raw("-");
1965   err.cr();
1966   err.print_raw_cr(message);
1967   for (i = 0; i < 78; i++) err.print_raw("=");
1968   err.cr();
1969 
1970   char buf[16];
1971   // Prevent process from exiting upon "read error" without consuming all CPU
1972   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
1973 
1974   return buf[0] == 'y' || buf[0] == 'Y';
1975 }
1976 
1977 ////////////////////////////////////////////////////////////////////////////////
1978 // runtime exit support
1979 
1980 // Note: os::shutdown() might be called very early during initialization, or
1981 // called from signal handler. Before adding something to os::shutdown(), make
1982 // sure it is async-safe and can handle partially initialized VM.
1983 void os::shutdown() {
1984 
1985   // allow PerfMemory to attempt cleanup of any persistent resources
1986   perfMemory_exit();
1987 
1988   // needs to remove object in file system
1989   AttachListener::abort();
1990 
1991   // flush buffered output, finish log files
1992   ostream_abort();
1993 
1994   // Check for abort hook
1995   abort_hook_t abort_hook = Arguments::abort_hook();
1996   if (abort_hook != nullptr) {
1997     abort_hook();
1998   }
1999 
2000 }
2001 
2002 // Note: os::abort() might be called very early during initialization, or
2003 // called from signal handler. Before adding something to os::abort(), make
2004 // sure it is async-safe and can handle partially initialized VM.
2005 // Also note we can abort while other threads continue to run, so we can
2006 // easily trigger secondary faults in those threads. To reduce the likelihood
2007 // of that we use _exit rather than exit, so that no atexit hooks get run.
2008 // But note that os::shutdown() could also trigger secondary faults.
2009 void os::abort(bool dump_core, void* siginfo, const void* context) {
2010   os::shutdown();
2011   if (dump_core) {
2012     LINUX_ONLY(if (DumpPrivateMappingsInCore) ClassLoader::close_jrt_image();)
2013     ::abort(); // dump core
2014   }
2015   os::_exit(1);
2016 }
2017 
2018 // Die immediately, no exit hook, no abort hook, no cleanup.
2019 // Dump a core file, if possible, for debugging.
2020 void os::die() {
2021   if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
2022     // For TimeoutInErrorHandlingTest.java, we just kill the VM
2023     // and don't take the time to generate a core file.
2024     ::raise(SIGKILL);
2025     // ::raise is not noreturn, even though with SIGKILL it definitely won't
2026     // return.  Hence "fall through" to ::abort, which is declared noreturn.
2027   }
2028   ::abort();
2029 }
2030 
2031 const char* os::file_separator() { return "/"; }
2032 const char* os::line_separator() { return "\n"; }
2033 const char* os::path_separator() { return ":"; }